prithivMLmods commited on
Commit
db8e9a9
·
verified ·
1 Parent(s): a9159a6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -1
app.py CHANGED
@@ -18,7 +18,7 @@ from qwen_vl_utils import process_vision_info
18
 
19
  # Constants for text generation
20
  MAX_MAX_NEW_TOKENS = 4096
21
- DEFAULT_MAX_NEW_TOKENS = 2099
22
  MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
23
 
24
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
 
18
 
19
  # Constants for text generation
20
  MAX_MAX_NEW_TOKENS = 4096
21
+ DEFAULT_MAX_NEW_TOKENS = 2048
22
  MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
23
 
24
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")