nroggendorff commited on
Commit
e2cf6d7
·
verified ·
1 Parent(s): eaa328b

Update train.py

Browse files
Files changed (1) hide show
  1. train.py +4 -4
train.py CHANGED
@@ -7,14 +7,14 @@ from transformers import AutoTokenizer, LlamaConfig, LlamaForCausalLM, TrainingA
7
  from datasets import load_dataset, DatasetDict, Dataset
8
  from tokenizers import ByteLevelBPETokenizer
9
 
10
- MAX_SEQ_LENGTH = 128
11
  BATCH_SIZE = 128
12
  EPOCHS = 3
13
  LEARNING_RATE = 2.5e-5
14
  FACTOR = 256
15
  VOCAB_SIZE = 32000
16
  INPUT_DATASET = "HuggingFaceTB/smollm-corpus"
17
- INSTRUCT_DATASET = "nroggendorff/elephant"
18
  OUTPUT_REPO = "smallama"
19
  FP16 = True
20
  WARMUP_STEPS = 0
@@ -25,9 +25,9 @@ PUSH_TO_HUB = True
25
 
26
  def load_data():
27
  pretrain = load_dataset(INPUT_DATASET, "cosmopedia-v2", split="train", streaming=True)
28
- pretrain = Dataset.from_generator(lambda: pretrain.take(int(2.5e+6)))
29
  instruct = load_dataset(INSTRUCT_DATASET, split="train", streaming=True)
30
- instruct = Dataset.from_generator(lambda: instruct.take(int(3e+6)))
31
  dataset_dict = DatasetDict({
32
  'pretrain': pretrain,
33
  'instruct': instruct
 
7
  from datasets import load_dataset, DatasetDict, Dataset
8
  from tokenizers import ByteLevelBPETokenizer
9
 
10
+ MAX_SEQ_LENGTH = 512
11
  BATCH_SIZE = 128
12
  EPOCHS = 3
13
  LEARNING_RATE = 2.5e-5
14
  FACTOR = 256
15
  VOCAB_SIZE = 32000
16
  INPUT_DATASET = "HuggingFaceTB/smollm-corpus"
17
+ INSTRUCT_DATASET = "nroggendorff/openhermes"
18
  OUTPUT_REPO = "smallama"
19
  FP16 = True
20
  WARMUP_STEPS = 0
 
25
 
26
  def load_data():
27
  pretrain = load_dataset(INPUT_DATASET, "cosmopedia-v2", split="train", streaming=True)
28
+ pretrain = Dataset.from_generator(lambda: pretrain.take(int(2.5e+4)))
29
  instruct = load_dataset(INSTRUCT_DATASET, split="train", streaming=True)
30
+ instruct = Dataset.from_generator(lambda: instruct.take(int(5e+4)))
31
  dataset_dict = DatasetDict({
32
  'pretrain': pretrain,
33
  'instruct': instruct