root commited on
Commit
7ba23b1
·
1 Parent(s): a8e6b52

fix: update params for API

Browse files

Signed-off-by: root <[email protected]>

Files changed (1) hide show
  1. app.py +37 -28
app.py CHANGED
@@ -7,31 +7,37 @@ import html
7
  from transformers import DonutProcessor, VisionEncoderDecoderModel
8
 
9
 
10
- global model, processor, device
11
-
 
 
 
 
12
 
13
  def load_model(pretrained_revision: str = 'main'):
14
- global model, processor, device
15
  pretrained_repo_name = 'ivelin/donut-refexp-click'
16
  # revision can be git commit hash, branch or tag
17
  # use 'main' for latest revision
18
- print(f"Loading model checkpoint from repo: {pretrained_repo_name}, revision: {pretrained_revision}")
19
- processor = DonutProcessor.from_pretrained(
 
 
20
  pretrained_repo_name, revision=pretrained_revision, use_auth_token="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")
21
- processor.image_processor.do_align_long_axis = False
22
- # do not manipulate image size and position
23
- processor.image_processor.do_resize = False
24
- processor.image_processor.do_thumbnail = False
25
- processor.image_processor.do_pad = False
26
- # processor.image_processor.do_rescale = False
27
- processor.image_processor.do_normalize = True
28
- print(f'processor image size: {processor.image_processor.size}')
29
- model = VisionEncoderDecoderModel.from_pretrained(
30
  pretrained_repo_name, use_auth_token="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", revision=pretrained_revision)
31
-
32
- device = "cuda" if torch.cuda.is_available() else "cpu"
33
- model.to(device)
34
-
35
 
36
  def prepare_image_for_encoder(image=None, output_image_size=None):
37
  """
@@ -89,7 +95,7 @@ def translate_point_coords_from_out_to_in(point=None, input_image_size=None, out
89
  f"translated point={point}, resized_image_size: {resized_width, resized_height}")
90
 
91
 
92
- def process_refexp(image: Image, prompt: str, model_revision: str = 'main'):
93
 
94
  print(f"(image, prompt): {image}, {prompt}")
95
 
@@ -182,13 +188,16 @@ def process_refexp(image: Image, prompt: str, model_revision: str = 'main'):
182
  print(
183
  f"to image pixel values: x, y: {x, y}")
184
 
185
- # draw center point circle
186
- img1 = ImageDraw.Draw(image)
187
-
188
- r = 30
189
- shape = [(x-r, y-r), (x+r, y+r)]
190
- img1.ellipse(shape, outline="green", width=20)
191
- img1.ellipse(shape, outline="white", width=10)
 
 
 
192
 
193
  return image, center_point
194
 
@@ -221,7 +230,7 @@ examples = [["example_1.jpg", "select the setting icon from top right corner", "
221
  ]
222
 
223
  demo = gr.Interface(fn=process_refexp,
224
- inputs=[gr.Image(type="pil"), "text", "text"],
225
  outputs=[gr.Image(type="pil"), "json"],
226
  title=title,
227
  description=description,
@@ -231,4 +240,4 @@ demo = gr.Interface(fn=process_refexp,
231
  cache_examples=False
232
  )
233
 
234
- demo.launch() # share=True when running in a Jupyter Notebook
 
7
  from transformers import DonutProcessor, VisionEncoderDecoderModel
8
 
9
 
10
+ global model, loaded_revision, processor, device
11
+ model = None
12
+ previous_revision=None
13
+ processor=None
14
+ device=None
15
+ loaded_revision=None
16
 
17
  def load_model(pretrained_revision: str = 'main'):
18
+ global model, loaded_revision, processor, device
19
  pretrained_repo_name = 'ivelin/donut-refexp-click'
20
  # revision can be git commit hash, branch or tag
21
  # use 'main' for latest revision
22
+ print(f"Loading model checkpoint from repo: {pretrained_repo_name}, revision: {pretrained_revision}")
23
+ if processor is None or loaded_revision is None or loaded_revision != pretrained_revision:
24
+ loaded_revision=pretrained_revision
25
+ processor = DonutProcessor.from_pretrained(
26
  pretrained_repo_name, revision=pretrained_revision, use_auth_token="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")
27
+ processor.image_processor.do_align_long_axis = False
28
+ # do not manipulate image size and position
29
+ processor.image_processor.do_resize = False
30
+ processor.image_processor.do_thumbnail = False
31
+ processor.image_processor.do_pad = False
32
+ # processor.image_processor.do_rescale = False
33
+ processor.image_processor.do_normalize = True
34
+ print(f'processor image size: {processor.image_processor.size}')
35
+ model = VisionEncoderDecoderModel.from_pretrained(
36
  pretrained_repo_name, use_auth_token="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", revision=pretrained_revision)
37
+ print(f'model checkpoint loaded')
38
+ device = "cuda" if torch.cuda.is_available() else "cpu"
39
+ model.to(device)
40
+
41
 
42
  def prepare_image_for_encoder(image=None, output_image_size=None):
43
  """
 
95
  f"translated point={point}, resized_image_size: {resized_width, resized_height}")
96
 
97
 
98
+ def process_refexp(image: Image, prompt: str, model_revision: str = 'main', return_annotated_image: bool = False):
99
 
100
  print(f"(image, prompt): {image}, {prompt}")
101
 
 
188
  print(
189
  f"to image pixel values: x, y: {x, y}")
190
 
191
+ if return_annotated_image:
192
+ # draw center point circle
193
+ img1 = ImageDraw.Draw(image)
194
+ r = 30
195
+ shape = [(x-r, y-r), (x+r, y+r)]
196
+ img1.ellipse(shape, outline="green", width=20)
197
+ img1.ellipse(shape, outline="white", width=10)
198
+ else:
199
+ # do not return image if its an API call to save bandwidth
200
+ image = None
201
 
202
  return image, center_point
203
 
 
230
  ]
231
 
232
  demo = gr.Interface(fn=process_refexp,
233
+ inputs=[gr.Image(type="pil"), "text", "text", gr.Checkbox(value=True, label="Return Annotated Image", visible=False)],
234
  outputs=[gr.Image(type="pil"), "json"],
235
  title=title,
236
  description=description,
 
240
  cache_examples=False
241
  )
242
 
243
+ demo.launch(server_name="0.0.0.0") # share=True when running in a Jupyter Notebook