prithivMLmods commited on
Commit
7f1e816
·
verified ·
1 Parent(s): 2e935f0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -5
app.py CHANGED
@@ -116,10 +116,10 @@ moondream = AutoModelForCausalLM.from_pretrained(
116
  )
117
  tokenizer_md = AutoTokenizer.from_pretrained(MODEL_ID_MD, revision=REVISION_MD)
118
 
119
- # --- dse-qwen2-2b-mrl-v1 ---
120
- MODEL_ID_N = "MrLight/dse-qwen2-2b-mrl-v1"
121
  processor_n = AutoProcessor.from_pretrained(MODEL_ID_N, trust_remote_code=True)
122
- model_n = Qwen2VLForConditionalGeneration.from_pretrained(
123
  MODEL_ID_N, trust_remote_code=True, torch_dtype=torch.float16
124
  ).to(device).eval()
125
 
@@ -231,7 +231,7 @@ def process_document_stream(
231
  elif model_name == "VLAA-Thinker-Qwen2VL-2B(reason)": processor, model = processor_i, model_i
232
  elif model_name == "Nanonets-OCR-s(ocr)": processor, model = processor_a, model_a
233
  elif model_name == "Megalodon-OCR-Sync-0713(ocr)": processor, model = processor_x, model_x
234
- elif model_name == "dse-qwen2-2b-mrl-v1(dse)": processor, model = processor_n, model_n
235
  else:
236
  yield "Invalid model selected.", ""
237
  return
@@ -289,7 +289,7 @@ def create_gradio_interface():
289
  with gr.Column(scale=1):
290
  model_choice = gr.Dropdown(
291
  choices=["LFM2-VL-450M(fast)", "LFM2-VL-1.6B(fast)", "SmolVLM-Instruct-250M(smol)", "Moondream2(vision)", "ShotVL-3B(cinematic)", "Megalodon-OCR-Sync-0713(ocr)",
292
- "VLAA-Thinker-Qwen2VL-2B(reason)", "MonkeyOCR-pro-1.2B(ocr)", "dse-qwen2-2b-mrl-v1(dse)", "Nanonets-OCR-s(ocr)"],
293
  label="Select Model", value= "LFM2-VL-450M(fast)"
294
  )
295
  prompt_input = gr.Textbox(label="Query Input", placeholder="✦︎ Enter your query", value="Describe the image!")
 
116
  )
117
  tokenizer_md = AutoTokenizer.from_pretrained(MODEL_ID_MD, revision=REVISION_MD)
118
 
119
+ # --- Qwen2.5-VL-3B-Abliterated-Caption-it ---
120
+ MODEL_ID_N = "prithivMLmods/Qwen2.5-VL-3B-Abliterated-Caption-it"
121
  processor_n = AutoProcessor.from_pretrained(MODEL_ID_N, trust_remote_code=True)
122
+ model_n = Qwen2_5_VLForConditionalGeneration.from_pretrained(
123
  MODEL_ID_N, trust_remote_code=True, torch_dtype=torch.float16
124
  ).to(device).eval()
125
 
 
231
  elif model_name == "VLAA-Thinker-Qwen2VL-2B(reason)": processor, model = processor_i, model_i
232
  elif model_name == "Nanonets-OCR-s(ocr)": processor, model = processor_a, model_a
233
  elif model_name == "Megalodon-OCR-Sync-0713(ocr)": processor, model = processor_x, model_x
234
+ elif model_name == "Qwen2.5-VL-3B-Abliterated-Caption-it(caption)": processor, model = processor_n, model_n
235
  else:
236
  yield "Invalid model selected.", ""
237
  return
 
289
  with gr.Column(scale=1):
290
  model_choice = gr.Dropdown(
291
  choices=["LFM2-VL-450M(fast)", "LFM2-VL-1.6B(fast)", "SmolVLM-Instruct-250M(smol)", "Moondream2(vision)", "ShotVL-3B(cinematic)", "Megalodon-OCR-Sync-0713(ocr)",
292
+ "VLAA-Thinker-Qwen2VL-2B(reason)", "MonkeyOCR-pro-1.2B(ocr)", "Qwen2.5-VL-3B-Abliterated-Caption-it(caption)", "Nanonets-OCR-s(ocr)"],
293
  label="Select Model", value= "LFM2-VL-450M(fast)"
294
  )
295
  prompt_input = gr.Textbox(label="Query Input", placeholder="✦︎ Enter your query", value="Describe the image!")