prithivMLmods commited on
Commit
9e5e7f6
·
verified ·
1 Parent(s): 447413e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -1
app.py CHANGED
@@ -18,7 +18,7 @@ from qwen_vl_utils import process_vision_info
18
 
19
  # Constants for text generation
20
  MAX_MAX_NEW_TOKENS = 4096
21
- DEFAULT_MAX_NEW_TOKENS = 3000
22
  MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
23
 
24
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
 
18
 
19
  # Constants for text generation
20
  MAX_MAX_NEW_TOKENS = 4096
21
+ DEFAULT_MAX_NEW_TOKENS = 3584
22
  MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
23
 
24
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")