Spaces:
Running
on
Zero
Running
on
Zero
Commit
·
9a0f0f5
1
Parent(s):
1288171
debug
Browse files
app.py
CHANGED
@@ -38,8 +38,8 @@ tokenizer = AutoTokenizer.from_pretrained(LLM_MODEL_PATH)
|
|
38 |
mllm = Qwen2_5_VLForConditionalGeneration.from_pretrained(MLLM_MODEL_PATH, torch_dtype=torch.bfloat16, device_map="auto")
|
39 |
llm = Qwen2ForCausalLM.from_pretrained(LLM_MODEL_PATH, torch_dtype=torch.bfloat16, device_map="auto")
|
40 |
|
41 |
-
mllm_sampling = dict(temperature=0,
|
42 |
-
llm_sampling = dict(temperature=0.6, top_p=0.95,
|
43 |
|
44 |
# === Build Prompts ===
|
45 |
def build_messages(image_path, question):
|
|
|
38 |
mllm = Qwen2_5_VLForConditionalGeneration.from_pretrained(MLLM_MODEL_PATH, torch_dtype=torch.bfloat16, device_map="auto")
|
39 |
llm = Qwen2ForCausalLM.from_pretrained(LLM_MODEL_PATH, torch_dtype=torch.bfloat16, device_map="auto")
|
40 |
|
41 |
+
mllm_sampling = dict(temperature=0, max_new_tokens=8192)
|
42 |
+
llm_sampling = dict(temperature=0.6, top_p=0.95, max_new_tokens=8192)
|
43 |
|
44 |
# === Build Prompts ===
|
45 |
def build_messages(image_path, question):
|