Files changed (4) hide show
  1. 3.gif +0 -0
  2. README.md +1 -1
  3. app.py +4 -6
  4. requirements.txt +7 -7
3.gif ADDED
README.md CHANGED
@@ -4,7 +4,7 @@ emoji: πŸ“š
4
  colorFrom: red
5
  colorTo: purple
6
  sdk: gradio
7
- sdk_version: 4.36.1
8
  python_version: 3.9.13
9
  app_file: app.py
10
  pinned: false
 
4
  colorFrom: red
5
  colorTo: purple
6
  sdk: gradio
7
+ sdk_version: 3.1.0
8
  python_version: 3.9.13
9
  app_file: app.py
10
  pinned: false
app.py CHANGED
@@ -37,16 +37,14 @@ class App:
37
  def _get_model(self, name):
38
  if name in self._model_cache:
39
  return self._model_cache[name]
40
- model = torch.hub.load('baudm/parseq', name, pretrained=True, trust_repo=True).eval()
 
41
  self._model_cache[name] = model
42
  return model
43
 
44
- @torch.inference_mode()
45
  def __call__(self, model_name, image):
46
  if image is None:
47
  return '', []
48
- if isinstance(image, dict): # Extact image from ImageEditor output
49
- image = image['composite']
50
  model = self._get_model(model_name)
51
  image = self._preprocess(image.convert('RGB')).unsqueeze(0)
52
  # Greedy decoding
@@ -81,11 +79,11 @@ def main():
81
  model_name = gr.Radio(app.models, value=app.models[0], label='The STR model to use')
82
  with gr.Tabs():
83
  with gr.TabItem('Image Upload'):
84
- image_upload = gr.Image(type='pil', sources=['upload'], label='Image')
85
  gr.Examples(glob.glob('demo_images/*.*'), inputs=image_upload)
86
  read_upload = gr.Button('Read Text')
87
  with gr.TabItem('Canvas Sketch'):
88
- image_canvas = gr.ImageEditor(type='pil', sources=[], label='Sketch', image_mode='RGB', layers=False, canvas_size=(768, 192))
89
  read_canvas = gr.Button('Read Text')
90
 
91
  output = gr.Textbox(max_lines=1, label='Model output')
 
37
  def _get_model(self, name):
38
  if name in self._model_cache:
39
  return self._model_cache[name]
40
+ model = torch.hub.load('baudm/parseq', name, pretrained=True).eval()
41
+ model.freeze()
42
  self._model_cache[name] = model
43
  return model
44
 
 
45
  def __call__(self, model_name, image):
46
  if image is None:
47
  return '', []
 
 
48
  model = self._get_model(model_name)
49
  image = self._preprocess(image.convert('RGB')).unsqueeze(0)
50
  # Greedy decoding
 
79
  model_name = gr.Radio(app.models, value=app.models[0], label='The STR model to use')
80
  with gr.Tabs():
81
  with gr.TabItem('Image Upload'):
82
+ image_upload = gr.Image(type='pil', source='upload', label='Image')
83
  gr.Examples(glob.glob('demo_images/*.*'), inputs=image_upload)
84
  read_upload = gr.Button('Read Text')
85
  with gr.TabItem('Canvas Sketch'):
86
+ image_canvas = gr.Image(type='pil', source='canvas', label='Sketch')
87
  read_canvas = gr.Button('Read Text')
88
 
89
  output = gr.Textbox(max_lines=1, label='Model output')
requirements.txt CHANGED
@@ -1,8 +1,8 @@
1
- --extra-index-url https://download.pytorch.org/whl/cpu
2
-
3
- gradio ~=4.36.0
4
- torch >=2.0.0
5
- torchvision >=0.15.0
6
- pytorch-lightning ~=2.2.0
7
- timm ~=0.9.16
8
  nltk
 
 
1
+ Gradio
2
+ torch
3
+ torchtext
4
+ torchvision
5
+ torchmetrics==0.6.2
6
+ timm==0.4.12
 
7
  nltk
8
+ git+https://github.com/baudm/parseq.git