prithivMLmods commited on
Commit
6a6b031
·
verified ·
1 Parent(s): 208e273

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -17,8 +17,8 @@ from transformers import (
17
  from qwen_vl_utils import process_vision_info
18
 
19
  # Constants for text generation
20
- MAX_MAX_NEW_TOKENS = 4096
21
- DEFAULT_MAX_NEW_TOKENS = 3584
22
  MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
23
 
24
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
 
17
  from qwen_vl_utils import process_vision_info
18
 
19
  # Constants for text generation
20
+ MAX_MAX_NEW_TOKENS = 16384
21
+ DEFAULT_MAX_NEW_TOKENS = 8192
22
  MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
23
 
24
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")