prithivMLmods commited on
Commit
37b11c9
·
verified ·
1 Parent(s): d301ee7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +26 -10
app.py CHANGED
@@ -93,6 +93,7 @@ def generate_image(model_name: str, text: str, image: Image.Image,
93
  repetition_penalty: float = 1.2):
94
  """
95
  Generates responses using the selected model for image input.
 
96
  """
97
  if model_name == "Cosmos-Reason1-7B":
98
  processor = processor_m
@@ -107,11 +108,11 @@ def generate_image(model_name: str, text: str, image: Image.Image,
107
  processor = processor_v
108
  model = model_v
109
  else:
110
- yield "Invalid model selected."
111
  return
112
 
113
  if image is None:
114
- yield "Please upload an image."
115
  return
116
 
117
  messages = [{
@@ -138,7 +139,7 @@ def generate_image(model_name: str, text: str, image: Image.Image,
138
  for new_text in streamer:
139
  buffer += new_text
140
  time.sleep(0.01)
141
- yield buffer
142
 
143
  @spaces.GPU
144
  def generate_video(model_name: str, text: str, video_path: str,
@@ -149,6 +150,7 @@ def generate_video(model_name: str, text: str, video_path: str,
149
  repetition_penalty: float = 1.2):
150
  """
151
  Generates responses using the selected model for video input.
 
152
  """
153
  if model_name == "Cosmos-Reason1-7B":
154
  processor = processor_m
@@ -163,11 +165,11 @@ def generate_video(model_name: str, text: str, video_path: str,
163
  processor = processor_v
164
  model = model_v
165
  else:
166
- yield "Invalid model selected."
167
  return
168
 
169
  if video_path is None:
170
- yield "Please upload a video."
171
  return
172
 
173
  frames = downsample_video(video_path)
@@ -205,7 +207,7 @@ def generate_video(model_name: str, text: str, video_path: str,
205
  for new_text in streamer:
206
  buffer += new_text
207
  time.sleep(0.01)
208
- yield buffer
209
 
210
  # Define examples for image and video inference
211
  image_examples = [
@@ -226,6 +228,11 @@ css = """
226
  .submit-btn:hover {
227
  background-color: #3498db !important;
228
  }
 
 
 
 
 
229
  """
230
 
231
  # Create the Gradio Interface
@@ -250,34 +257,43 @@ with gr.Blocks(css=css, theme="bethecloud/storj_theme") as demo:
250
  examples=video_examples,
251
  inputs=[video_query, video_upload]
252
  )
 
253
  with gr.Accordion("Advanced options", open=False):
254
  max_new_tokens = gr.Slider(label="Max new tokens", minimum=1, maximum=MAX_MAX_NEW_TOKENS, step=1, value=DEFAULT_MAX_NEW_TOKENS)
255
  temperature = gr.Slider(label="Temperature", minimum=0.1, maximum=4.0, step=0.1, value=0.6)
256
  top_p = gr.Slider(label="Top-p (nucleus sampling)", minimum=0.05, maximum=1.0, step=0.05, value=0.9)
257
  top_k = gr.Slider(label="Top-k", minimum=1, maximum=1000, step=1, value=50)
258
  repetition_penalty = gr.Slider(label="Repetition penalty", minimum=1.0, maximum=2.0, step=0.05, value=1.2)
 
259
  with gr.Column():
260
- output = gr.Textbox(label="Output", interactive=False, lines=2, scale=2)
 
 
 
 
 
 
261
  model_choice = gr.Radio(
262
  choices=["Cosmos-Reason1-7B", "docscopeOCR-7B-050425-exp", "Captioner-7B", "visionOCR-3B"],
263
  label="Select Model",
264
  value="Cosmos-Reason1-7B"
265
  )
266
-
267
  gr.Markdown("**Model Info 💻** | [Report Bug](https://huggingface.co/spaces/prithivMLmods/DocScope-R1/discussions)")
268
  gr.Markdown("> [Cosmos-Reason1-7B](https://huggingface.co/nvidia/Cosmos-Reason1-7B): understand physical common sense and generate appropriate embodied decisions.")
269
  gr.Markdown("> [docscopeOCR-7B-050425-exp](https://huggingface.co/prithivMLmods/docscopeOCR-7B-050425-exp): optimized for document-level optical character recognition, long-context vision-language understanding.")
270
  gr.Markdown("> [Captioner-Relaxed-7B](https://huggingface.co/Ertugrul/Qwen2.5-VL-7B-Captioner-Relaxed): build with hand-curated dataset for text-to-image models, providing significantly more detailed descriptions or captions of given images.")
 
 
271
 
272
  image_submit.click(
273
  fn=generate_image,
274
  inputs=[model_choice, image_query, image_upload, max_new_tokens, temperature, top_p, top_k, repetition_penalty],
275
- outputs=output
276
  )
277
  video_submit.click(
278
  fn=generate_video,
279
  inputs=[model_choice, video_query, video_upload, max_new_tokens, temperature, top_p, top_k, repetition_penalty],
280
- outputs=output
281
  )
282
 
283
  if __name__ == "__main__":
 
93
  repetition_penalty: float = 1.2):
94
  """
95
  Generates responses using the selected model for image input.
96
+ Yields raw text and Markdown-formatted text.
97
  """
98
  if model_name == "Cosmos-Reason1-7B":
99
  processor = processor_m
 
108
  processor = processor_v
109
  model = model_v
110
  else:
111
+ yield "Invalid model selected.", "Invalid model selected."
112
  return
113
 
114
  if image is None:
115
+ yield "Please upload an image.", "Please upload an image."
116
  return
117
 
118
  messages = [{
 
139
  for new_text in streamer:
140
  buffer += new_text
141
  time.sleep(0.01)
142
+ yield buffer, buffer
143
 
144
  @spaces.GPU
145
  def generate_video(model_name: str, text: str, video_path: str,
 
150
  repetition_penalty: float = 1.2):
151
  """
152
  Generates responses using the selected model for video input.
153
+ Yields raw text and Markdown-formatted text.
154
  """
155
  if model_name == "Cosmos-Reason1-7B":
156
  processor = processor_m
 
165
  processor = processor_v
166
  model = model_v
167
  else:
168
+ yield "Invalid model selected.", "Invalid model selected."
169
  return
170
 
171
  if video_path is None:
172
+ yield "Please upload a video.", "Please upload a video."
173
  return
174
 
175
  frames = downsample_video(video_path)
 
207
  for new_text in streamer:
208
  buffer += new_text
209
  time.sleep(0.01)
210
+ yield buffer, buffer
211
 
212
  # Define examples for image and video inference
213
  image_examples = [
 
228
  .submit-btn:hover {
229
  background-color: #3498db !important;
230
  }
231
+ .canvas-output {
232
+ border: 2px solid #4682B4;
233
+ border-radius: 10px;
234
+ padding: 20px;
235
+ }
236
  """
237
 
238
  # Create the Gradio Interface
 
257
  examples=video_examples,
258
  inputs=[video_query, video_upload]
259
  )
260
+
261
  with gr.Accordion("Advanced options", open=False):
262
  max_new_tokens = gr.Slider(label="Max new tokens", minimum=1, maximum=MAX_MAX_NEW_TOKENS, step=1, value=DEFAULT_MAX_NEW_TOKENS)
263
  temperature = gr.Slider(label="Temperature", minimum=0.1, maximum=4.0, step=0.1, value=0.6)
264
  top_p = gr.Slider(label="Top-p (nucleus sampling)", minimum=0.05, maximum=1.0, step=0.05, value=0.9)
265
  top_k = gr.Slider(label="Top-k", minimum=1, maximum=1000, step=1, value=50)
266
  repetition_penalty = gr.Slider(label="Repetition penalty", minimum=1.0, maximum=2.0, step=0.05, value=1.2)
267
+
268
  with gr.Column():
269
+ with gr.Column(elem_classes="canvas-output"):
270
+ gr.Markdown("## Result.Md")
271
+ raw_output = gr.Textbox(label="Raw Output Stream", interactive=False, lines=2)
272
+
273
+ with gr.Accordion("Formatted Result (Result.md)", open=False):
274
+ markdown_output = gr.Markdown()
275
+
276
  model_choice = gr.Radio(
277
  choices=["Cosmos-Reason1-7B", "docscopeOCR-7B-050425-exp", "Captioner-7B", "visionOCR-3B"],
278
  label="Select Model",
279
  value="Cosmos-Reason1-7B"
280
  )
 
281
  gr.Markdown("**Model Info 💻** | [Report Bug](https://huggingface.co/spaces/prithivMLmods/DocScope-R1/discussions)")
282
  gr.Markdown("> [Cosmos-Reason1-7B](https://huggingface.co/nvidia/Cosmos-Reason1-7B): understand physical common sense and generate appropriate embodied decisions.")
283
  gr.Markdown("> [docscopeOCR-7B-050425-exp](https://huggingface.co/prithivMLmods/docscopeOCR-7B-050425-exp): optimized for document-level optical character recognition, long-context vision-language understanding.")
284
  gr.Markdown("> [Captioner-Relaxed-7B](https://huggingface.co/Ertugrul/Qwen2.5-VL-7B-Captioner-Relaxed): build with hand-curated dataset for text-to-image models, providing significantly more detailed descriptions or captions of given images.")
285
+ gr.Markdown("> [visionOCR-3B](https://huggingface.co/prithivMLmods/visionOCR-3B-061125): visionocr-3b-061125 model is a fine-tuned version of qwen2.5-vl-3b-instruct, optimized for document-level optical character recognition (ocr), long-context vision-language understanding.")
286
+ gr.Markdown(">⚠️note: all the models in space are not guaranteed to perform well in video inference use cases.")
287
 
288
  image_submit.click(
289
  fn=generate_image,
290
  inputs=[model_choice, image_query, image_upload, max_new_tokens, temperature, top_p, top_k, repetition_penalty],
291
+ outputs=[raw_output, markdown_output]
292
  )
293
  video_submit.click(
294
  fn=generate_video,
295
  inputs=[model_choice, video_query, video_upload, max_new_tokens, temperature, top_p, top_k, repetition_penalty],
296
+ outputs=[raw_output, markdown_output]
297
  )
298
 
299
  if __name__ == "__main__":