LLMproj1 commited on
Commit
783a29f
·
verified ·
1 Parent(s): 81aaffe

Upload folder using huggingface_hub

Browse files
Files changed (1) hide show
  1. app.py +2 -1
app.py CHANGED
@@ -21,7 +21,8 @@ import os
21
 
22
 
23
  # Define the parameters for the model
24
- max_seq_length = 2048 # Choose any! We auto support RoPE Scaling internally!
 
25
  dtype = None # None for auto detection. Float16 for Tesla T4, V100, Bfloat16 for Ampere+
26
  load_in_4bit = True # Use 4bit quantization to reduce memory usage. Can be False.
27
 
 
21
 
22
 
23
  # Define the parameters for the model
24
+ max_seq_length = 2048
25
+ # Choose any! We auto support RoPE Scaling internally!
26
  dtype = None # None for auto detection. Float16 for Tesla T4, V100, Bfloat16 for Ampere+
27
  load_in_4bit = True # Use 4bit quantization to reduce memory usage. Can be False.
28