Spaces:
Runtime error
Runtime error
Commit
Β·
5cdfef0
1
Parent(s):
32d3d67
test input
Browse files
app.py
CHANGED
@@ -18,14 +18,14 @@ from transformers import AutoModelForCausalLM
|
|
18 |
|
19 |
models = {
|
20 |
#"OS-Copilot/OS-Atlas-Base-7B": Qwen2VLForConditionalGeneration.from_pretrained("OS-Copilot/OS-Atlas-Base-7B", torch_dtype="auto", device_map="auto"),
|
21 |
-
"deepseek-ai/deepseek-vl2-tiny": AutoModelForCausalLM.from_pretrained("deepseek-ai/deepseek-vl2-tiny", trust_remote_code=True),
|
22 |
"deepseek-ai/deepseek-vl2-small": AutoModelForCausalLM.from_pretrained("deepseek-ai/deepseek-vl2-small", trust_remote_code=True),
|
23 |
#"deepseek-ai/deepseek-vl2": AutoModelForCausalLM.from_pretrained("deepseek-ai/deepseek-vl2", trust_remote_code=True)
|
24 |
}
|
25 |
|
26 |
processors = {
|
27 |
#"OS-Copilot/OS-Atlas-Base-7B": AutoProcessor.from_pretrained("OS-Copilot/OS-Atlas-Base-7B")
|
28 |
-
"deepseek-ai/deepseek-vl2-tiny": DeepseekVLV2Processor.from_pretrained("deepseek-ai/deepseek-vl2-tiny",),
|
29 |
"deepseek-ai/deepseek-vl2-small": DeepseekVLV2Processor.from_pretrained("deepseek-ai/deepseek-vl2-small",),
|
30 |
#"deepseek-ai/deepseek-vl2": DeepseekVLV2Processor.from_pretrained("deepseek-ai/deepseek-vl2",),
|
31 |
}
|
@@ -62,14 +62,13 @@ def rescale_bounding_boxes(bounding_boxes, original_width, original_height, scal
|
|
62 |
return rescaled_boxes
|
63 |
|
64 |
|
65 |
-
def deepseek(image, text_input):
|
66 |
print("helloe!!!!")
|
67 |
# specify the path to the model
|
68 |
-
|
69 |
-
vl_chat_processor: DeepseekVLV2Processor = DeepseekVLV2Processor.from_pretrained(model_path)
|
70 |
tokenizer = vl_chat_processor.tokenizer
|
71 |
|
72 |
-
vl_gpt: DeepseekVLV2ForCausalLM =
|
73 |
vl_gpt = vl_gpt.to(torch.bfloat16).cuda().eval()
|
74 |
|
75 |
## single image conversation example
|
@@ -123,7 +122,7 @@ def deepseek(image, text_input):
|
|
123 |
@spaces.GPU
|
124 |
def run_example(image, text_input, model_id="OS-Copilot/OS-Atlas-Base-7B"):
|
125 |
|
126 |
-
return deepseek(image, text_input)
|
127 |
|
128 |
|
129 |
def run_example_old(image, text_input, model_id="OS-Copilot/OS-Atlas-Base-7B"):
|
|
|
18 |
|
19 |
models = {
|
20 |
#"OS-Copilot/OS-Atlas-Base-7B": Qwen2VLForConditionalGeneration.from_pretrained("OS-Copilot/OS-Atlas-Base-7B", torch_dtype="auto", device_map="auto"),
|
21 |
+
#"deepseek-ai/deepseek-vl2-tiny": AutoModelForCausalLM.from_pretrained("deepseek-ai/deepseek-vl2-tiny", trust_remote_code=True),
|
22 |
"deepseek-ai/deepseek-vl2-small": AutoModelForCausalLM.from_pretrained("deepseek-ai/deepseek-vl2-small", trust_remote_code=True),
|
23 |
#"deepseek-ai/deepseek-vl2": AutoModelForCausalLM.from_pretrained("deepseek-ai/deepseek-vl2", trust_remote_code=True)
|
24 |
}
|
25 |
|
26 |
processors = {
|
27 |
#"OS-Copilot/OS-Atlas-Base-7B": AutoProcessor.from_pretrained("OS-Copilot/OS-Atlas-Base-7B")
|
28 |
+
#"deepseek-ai/deepseek-vl2-tiny": DeepseekVLV2Processor.from_pretrained("deepseek-ai/deepseek-vl2-tiny",),
|
29 |
"deepseek-ai/deepseek-vl2-small": DeepseekVLV2Processor.from_pretrained("deepseek-ai/deepseek-vl2-small",),
|
30 |
#"deepseek-ai/deepseek-vl2": DeepseekVLV2Processor.from_pretrained("deepseek-ai/deepseek-vl2",),
|
31 |
}
|
|
|
62 |
return rescaled_boxes
|
63 |
|
64 |
|
65 |
+
def deepseek(image, text_input, model_id):
|
66 |
print("helloe!!!!")
|
67 |
# specify the path to the model
|
68 |
+
vl_chat_processor: DeepseekVLV2Processor = processors[model_id]
|
|
|
69 |
tokenizer = vl_chat_processor.tokenizer
|
70 |
|
71 |
+
vl_gpt: DeepseekVLV2ForCausalLM = models[model_id]
|
72 |
vl_gpt = vl_gpt.to(torch.bfloat16).cuda().eval()
|
73 |
|
74 |
## single image conversation example
|
|
|
122 |
@spaces.GPU
|
123 |
def run_example(image, text_input, model_id="OS-Copilot/OS-Atlas-Base-7B"):
|
124 |
|
125 |
+
return deepseek(image, text_input, model_id)
|
126 |
|
127 |
|
128 |
def run_example_old(image, text_input, model_id="OS-Copilot/OS-Atlas-Base-7B"):
|