nroggendorff commited on
Commit
24db8b5
·
verified ·
1 Parent(s): bdf0ab5

Update train.py

Browse files
Files changed (1) hide show
  1. train.py +7 -7
train.py CHANGED
@@ -7,26 +7,26 @@ from transformers import AutoTokenizer, LlamaConfig, LlamaForCausalLM, TrainingA
7
  from datasets import load_dataset, DatasetDict, Dataset
8
  from tokenizers import ByteLevelBPETokenizer
9
 
10
- MAX_SEQ_LENGTH = 512
11
- BATCH_SIZE = 128
12
  EPOCHS = 2
13
- LEARNING_RATE = 2e-2
14
  FACTOR = 256
15
  VOCAB_SIZE = 32000
16
  INPUT_DATASET = "HuggingFaceTB/smollm-corpus"
17
  INSTRUCT_DATASET = "nroggendorff/openhermes"
18
  OUTPUT_REPO = "smallama"
19
- FP16 = True
20
  WARMUP_STEPS = 20
21
- DECAY = 0.01
22
  GRADIENT_ACCUMULATION_STEPS = 1
23
  CLIPPING = 1.0
24
  PUSH_TO_HUB = True
25
 
26
  def load_data():
27
  pretrain = load_dataset(INPUT_DATASET, "cosmopedia-v2", split="train", streaming=True)
28
- pretrain = Dataset.from_generator(lambda: pretrain.take(200000))
29
- instruct = load_dataset(INSTRUCT_DATASET, split="train").select(range(200000))
30
  dataset_dict = DatasetDict({
31
  'pretrain': pretrain,
32
  'instruct': instruct
 
7
  from datasets import load_dataset, DatasetDict, Dataset
8
  from tokenizers import ByteLevelBPETokenizer
9
 
10
+ MAX_SEQ_LENGTH = 128
11
+ BATCH_SIZE = 256
12
  EPOCHS = 2
13
+ LEARNING_RATE = 2e-3
14
  FACTOR = 256
15
  VOCAB_SIZE = 32000
16
  INPUT_DATASET = "HuggingFaceTB/smollm-corpus"
17
  INSTRUCT_DATASET = "nroggendorff/openhermes"
18
  OUTPUT_REPO = "smallama"
19
+ FP16 = False
20
  WARMUP_STEPS = 20
21
+ DECAY = 1e-3
22
  GRADIENT_ACCUMULATION_STEPS = 1
23
  CLIPPING = 1.0
24
  PUSH_TO_HUB = True
25
 
26
  def load_data():
27
  pretrain = load_dataset(INPUT_DATASET, "cosmopedia-v2", split="train", streaming=True)
28
+ pretrain = Dataset.from_generator(lambda: pretrain.take(int(2e+6)))
29
+ instruct = load_dataset(INSTRUCT_DATASET, split="train").select(range(int(2e+5)))
30
  dataset_dict = DatasetDict({
31
  'pretrain': pretrain,
32
  'instruct': instruct