alessandro trinca tornidor commited on
Commit
9d7a440
·
1 Parent(s): 01780f0

[refactor] start refactor app.py

Browse files
Files changed (1) hide show
  1. app.py +37 -22
app.py CHANGED
@@ -2,6 +2,7 @@ import argparse
2
  import os
3
  import re
4
  import sys
 
5
 
6
  import nh3
7
  import cv2
@@ -9,7 +10,6 @@ import gradio as gr
9
  import numpy as np
10
  import torch
11
  import torch.nn.functional as F
12
- from PIL import Image
13
  from transformers import AutoTokenizer, BitsAndBytesConfig, CLIPImageProcessor
14
 
15
  from model.LISA import LISAForCausalLM
@@ -334,25 +334,40 @@ def inference(input_str, input_image):
334
  return output_image, output_str
335
 
336
 
337
- demo = gr.Interface(
338
- inference,
339
- inputs=[
340
- gr.Textbox(lines=1, placeholder=None, label="Text Instruction"),
341
- gr.Image(type="filepath", label="Input Image"),
342
- ],
343
- outputs=[
344
- gr.Image(type="pil", label="Segmentation Output"),
345
- gr.Textbox(lines=1, placeholder=None, label="Text Output"),
346
- ],
347
- title=title,
348
- description=description,
349
- article=article,
350
- examples=examples,
351
- allow_flagging="auto",
352
- )
353
 
354
- demo.queue()
355
- demo.launch(
356
- share=False,
357
- server_name="0.0.0.0"
358
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
  import os
3
  import re
4
  import sys
5
+ from typing import Callable
6
 
7
  import nh3
8
  import cv2
 
10
  import numpy as np
11
  import torch
12
  import torch.nn.functional as F
 
13
  from transformers import AutoTokenizer, BitsAndBytesConfig, CLIPImageProcessor
14
 
15
  from model.LISA import LISAForCausalLM
 
334
  return output_image, output_str
335
 
336
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
337
 
338
+ def server_runner(
339
+ fn_inference: Callable,
340
+ debug: bool = False,
341
+ server_name: str = "0.0.0.0"
342
+ ):
343
+ inference_app = gr.Interface(
344
+ fn_inference,
345
+ inputs=[
346
+ gr.Textbox(lines=1, placeholder=None, label="Text Instruction"),
347
+ gr.Image(type="filepath", label="Input Image")
348
+ ],
349
+ outputs=[
350
+ gr.Image(type="pil", label="Segmentation Output"),
351
+ gr.Textbox(lines=1, placeholder=None, label="Text Output"),
352
+ ],
353
+ title=title,
354
+ description=description,
355
+ article=article,
356
+ examples=examples,
357
+ allow_flagging="auto",
358
+ )
359
+
360
+ inference_app.queue()
361
+ inference_app.launch(
362
+ share=False,
363
+ debug=debug,
364
+ server_name=server_name
365
+ )
366
+
367
+
368
+ if __name__ == '__main__':
369
+ server_runner(
370
+ inference,
371
+ debug=True,
372
+ server_name="0.0.0.0"
373
+ )