Spaces:
Sleeping
Sleeping
app.py
CHANGED
@@ -10,8 +10,8 @@ from datasets import load_dataset
|
|
10 |
|
11 |
huggingface_hub.login(os.getenv('HF_TOKEN'))
|
12 |
#peft_model_id = "debisoft/DeepSeek-R1-Distill-Qwen-7B-thinking-function_calling-quant-V0"
|
13 |
-
|
14 |
-
peft_model_id = "debisoft/Qwen2.5-VL-3B-Instruct-thinking-function_calling-V0"
|
15 |
|
16 |
bnb_config = BitsAndBytesConfig(
|
17 |
load_in_4bit=True,
|
@@ -52,7 +52,7 @@ def sentience_check():
|
|
52 |
|
53 |
with torch.no_grad():
|
54 |
outputs = peft_model.generate(
|
55 |
-
**inputs, max_new_tokens=
|
56 |
)
|
57 |
|
58 |
#peft_model.to(cpu_device)
|
|
|
10 |
|
11 |
huggingface_hub.login(os.getenv('HF_TOKEN'))
|
12 |
#peft_model_id = "debisoft/DeepSeek-R1-Distill-Qwen-7B-thinking-function_calling-quant-V0"
|
13 |
+
peft_model_id = "debisoft/Qwen2.5-VL-7B-Instruct-thinking-function_calling-quant-V0"
|
14 |
+
#peft_model_id = "debisoft/Qwen2.5-VL-3B-Instruct-thinking-function_calling-V0"
|
15 |
|
16 |
bnb_config = BitsAndBytesConfig(
|
17 |
load_in_4bit=True,
|
|
|
52 |
|
53 |
with torch.no_grad():
|
54 |
outputs = peft_model.generate(
|
55 |
+
**inputs, max_new_tokens=512, pad_token_id = tokenizer.eos_token_id
|
56 |
)
|
57 |
|
58 |
#peft_model.to(cpu_device)
|