Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -7,6 +7,11 @@ from transformers import MllamaForConditionalGeneration, AutoProcessor
|
|
7 |
# from huggingface_hub import login
|
8 |
# login("HUGGINGFACE_API_TOKEN")
|
9 |
|
|
|
|
|
|
|
|
|
|
|
10 |
# Load the Llama 3.2 Vision Model
|
11 |
def load_llama_model():
|
12 |
model_id = "meta-llama/Llama-3.2-11B-Vision"
|
@@ -15,7 +20,7 @@ def load_llama_model():
|
|
15 |
model = MllamaForConditionalGeneration.from_pretrained(
|
16 |
model_id,
|
17 |
torch_dtype=torch.bfloat16,
|
18 |
-
device_map="auto",
|
19 |
)
|
20 |
processor = AutoProcessor.from_pretrained(model_id)
|
21 |
|
|
|
7 |
# from huggingface_hub import login
|
8 |
# login("HUGGINGFACE_API_TOKEN")
|
9 |
|
10 |
+
# ckpt = "meta-llama/Llama-3.2-11B-Vision-Instruct"
|
11 |
+
# model = MllamaForConditionalGeneration.from_pretrained(ckpt,
|
12 |
+
# torch_dtype=torch.bfloat16).to("cuda")
|
13 |
+
# processor = AutoProcessor.from_pretrained(ckpt)
|
14 |
+
|
15 |
# Load the Llama 3.2 Vision Model
|
16 |
def load_llama_model():
|
17 |
model_id = "meta-llama/Llama-3.2-11B-Vision"
|
|
|
20 |
model = MllamaForConditionalGeneration.from_pretrained(
|
21 |
model_id,
|
22 |
torch_dtype=torch.bfloat16,
|
23 |
+
# device_map="auto",
|
24 |
)
|
25 |
processor = AutoProcessor.from_pretrained(model_id)
|
26 |
|