programmnix-askui commited on
Commit
f659d73
Β·
1 Parent(s): 9a43651

Add deepseek

Browse files
Files changed (1) hide show
  1. app.py +5 -3
app.py CHANGED
@@ -18,10 +18,12 @@ from transformers import AutoModelForCausalLM
18
 
19
  models = {
20
  #"OS-Copilot/OS-Atlas-Base-7B": Qwen2VLForConditionalGeneration.from_pretrained("OS-Copilot/OS-Atlas-Base-7B", torch_dtype="auto", device_map="auto"),
 
21
  }
22
 
23
  processors = {
24
  #"OS-Copilot/OS-Atlas-Base-7B": AutoProcessor.from_pretrained("OS-Copilot/OS-Atlas-Base-7B")
 
25
  }
26
 
27
 
@@ -56,7 +58,7 @@ def rescale_bounding_boxes(bounding_boxes, original_width, original_height, scal
56
  return rescaled_boxes
57
 
58
 
59
- def deepseek():
60
  print("helloe!!!!")
61
  # specify the path to the model
62
  model_path = "deepseek-ai/deepseek-vl2-tiny"
@@ -80,7 +82,7 @@ def deepseek():
80
  pil_images = load_pil_images(conversation)
81
  prepare_inputs = vl_chat_processor(
82
  conversations=conversation,
83
- images=pil_images,
84
  force_batchify=True,
85
  system_prompt=""
86
  ).to(vl_gpt.device)
@@ -107,7 +109,7 @@ def deepseek():
107
  @spaces.GPU
108
  def run_example(image, text_input, model_id="OS-Copilot/OS-Atlas-Base-7B"):
109
 
110
- deepseek()
111
 
112
 
113
  def run_example_old(image, text_input, model_id="OS-Copilot/OS-Atlas-Base-7B"):
 
18
 
19
  models = {
20
  #"OS-Copilot/OS-Atlas-Base-7B": Qwen2VLForConditionalGeneration.from_pretrained("OS-Copilot/OS-Atlas-Base-7B", torch_dtype="auto", device_map="auto"),
21
+ "deepseek-ai/deepseek-vl2-tiny": AutoModelForCausalLM.from_pretrained("deepseek-ai/deepseek-vl2-tiny", trust_remote_code=True)
22
  }
23
 
24
  processors = {
25
  #"OS-Copilot/OS-Atlas-Base-7B": AutoProcessor.from_pretrained("OS-Copilot/OS-Atlas-Base-7B")
26
+ "deepseek-ai/deepseek-vl2-tiny": DeepseekVLV2Processor.from_pretrained("deepseek-ai/deepseek-vl2-tiny",)
27
  }
28
 
29
 
 
58
  return rescaled_boxes
59
 
60
 
61
+ def deepseek(image):
62
  print("helloe!!!!")
63
  # specify the path to the model
64
  model_path = "deepseek-ai/deepseek-vl2-tiny"
 
82
  pil_images = load_pil_images(conversation)
83
  prepare_inputs = vl_chat_processor(
84
  conversations=conversation,
85
+ images=[image],
86
  force_batchify=True,
87
  system_prompt=""
88
  ).to(vl_gpt.device)
 
109
  @spaces.GPU
110
  def run_example(image, text_input, model_id="OS-Copilot/OS-Atlas-Base-7B"):
111
 
112
+ deepseek(image, )
113
 
114
 
115
  def run_example_old(image, text_input, model_id="OS-Copilot/OS-Atlas-Base-7B"):