Spaces:
Paused
Paused
Update app.py
Browse files
app.py
CHANGED
@@ -32,19 +32,19 @@ import torch
|
|
32 |
|
33 |
|
34 |
|
35 |
-
# Create a configuration for quantization
|
36 |
-
quantization_config = BitsAndBytesConfig(
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
)
|
42 |
|
43 |
# Load the model and processor
|
44 |
model = Qwen2VLForConditionalGeneration.from_pretrained(
|
45 |
"Qwen/QVQ-72B-Preview", device_map="auto",
|
46 |
quantization_config=quantization_config,
|
47 |
-
offload_folder="offload",
|
48 |
)
|
49 |
processor = AutoProcessor.from_pretrained("Qwen/QVQ-72B-Preview")
|
50 |
|
@@ -58,7 +58,8 @@ def process_image_and_question(image, question):
|
|
58 |
{
|
59 |
"role": "system",
|
60 |
"content": [
|
61 |
-
{"type": "text", "text": "You are a helpful and harmless assistant. You are Qwen developed by Alibaba. You should think step-by-step."}
|
|
|
62 |
],
|
63 |
},
|
64 |
{
|
|
|
32 |
|
33 |
|
34 |
|
35 |
+
# # Create a configuration for quantization
|
36 |
+
# quantization_config = BitsAndBytesConfig(
|
37 |
+
# load_in_4bit=True,
|
38 |
+
# bnb_4bit_compute_dtype="float16",
|
39 |
+
# bnb_4bit_use_double_quant=True,
|
40 |
+
# bnb_4bit_quant_type="nf4",
|
41 |
+
# )
|
42 |
|
43 |
# Load the model and processor
|
44 |
model = Qwen2VLForConditionalGeneration.from_pretrained(
|
45 |
"Qwen/QVQ-72B-Preview", device_map="auto",
|
46 |
quantization_config=quantization_config,
|
47 |
+
# offload_folder="offload",
|
48 |
)
|
49 |
processor = AutoProcessor.from_pretrained("Qwen/QVQ-72B-Preview")
|
50 |
|
|
|
58 |
{
|
59 |
"role": "system",
|
60 |
"content": [
|
61 |
+
# {"type": "text", "text": "You are a helpful and harmless assistant. You are Qwen developed by Alibaba. You should think step-by-step."}
|
62 |
+
{"type": "text", "text": "You are helpful assistant, you give answer in JSON"}
|
63 |
],
|
64 |
},
|
65 |
{
|