nroggendorff commited on
Commit
645d0c6
·
verified ·
1 Parent(s): dfdce33

Update train.py

Browse files
Files changed (1) hide show
  1. train.py +5 -5
train.py CHANGED
@@ -7,12 +7,12 @@ from transformers import AutoTokenizer, LlamaConfig, AutoModelForCausalLM, Llama
7
  from datasets import load_dataset, Dataset
8
  from tokenizers import ByteLevelBPETokenizer
9
 
10
- BATCH_SIZE = 16
11
- EPOCHS = 1
12
- LEARNING_RATE = 2e-4
13
  FACTOR = 22 * 20
14
  MAX_SEQ_LENGTH = 512
15
- VOCAB_SIZE = 32000
16
  INPUT_DATASET = "HuggingFaceTB/smollm-corpus"
17
  INSTRUCT_DATASET = "nroggendorff/elephant"
18
  OUTPUT_REPO = "nroggendorff/smallama"
@@ -21,7 +21,7 @@ INIT = 0#/2
21
  SHARD_SIZE = int(5e+6)
22
  FP16 = True
23
  WARMUP_STEPS = 0
24
- DECAY = 0
25
  GRADIENT_ACCUMULATION_STEPS = 1
26
  PUSH_TO_HUB = True
27
 
 
7
  from datasets import load_dataset, Dataset
8
  from tokenizers import ByteLevelBPETokenizer
9
 
10
+ BATCH_SIZE = 32
11
+ EPOCHS = 2
12
+ LEARNING_RATE = 3e-5
13
  FACTOR = 22 * 20
14
  MAX_SEQ_LENGTH = 512
15
+ VOCAB_SIZE = 52000
16
  INPUT_DATASET = "HuggingFaceTB/smollm-corpus"
17
  INSTRUCT_DATASET = "nroggendorff/elephant"
18
  OUTPUT_REPO = "nroggendorff/smallama"
 
21
  SHARD_SIZE = int(5e+6)
22
  FP16 = True
23
  WARMUP_STEPS = 0
24
+ DECAY = 1e-3
25
  GRADIENT_ACCUMULATION_STEPS = 1
26
  PUSH_TO_HUB = True
27