nroggendorff commited on
Commit
e728d3c
·
verified ·
1 Parent(s): db11568

Update train.py

Browse files
Files changed (1) hide show
  1. train.py +5 -5
train.py CHANGED
@@ -7,10 +7,10 @@ from transformers import AutoTokenizer, LlamaConfig, LlamaForCausalLM, TrainingA
7
  from datasets import load_dataset, DatasetDict, Dataset
8
  from tokenizers import ByteLevelBPETokenizer
9
 
10
- BATCH_SIZE = 96
11
- EPOCHS = 10
12
  LEARNING_RATE = 2e-4
13
- FACTOR = 512
14
  MAX_SEQ_LENGTH = 128
15
  VOCAB_SIZE = 32000
16
  INPUT_DATASET = "HuggingFaceTB/smollm-corpus"
@@ -24,9 +24,9 @@ PUSH_TO_HUB = True
24
 
25
  def load_data():
26
  pretrain = load_dataset(INPUT_DATASET, "cosmopedia-v2", split="train", streaming=True)
27
- pretrain = Dataset.from_generator(lambda: pretrain.take(int(3e+5)))
28
  instruct = load_dataset(INSTRUCT_DATASET, split="train", streaming=True)
29
- instruct = Dataset.from_generator(lambda: instruct.take(int(5e+5)))
30
  dataset_dict = DatasetDict({
31
  'pretrain': pretrain,
32
  'instruct': instruct
 
7
  from datasets import load_dataset, DatasetDict, Dataset
8
  from tokenizers import ByteLevelBPETokenizer
9
 
10
+ BATCH_SIZE = 4
11
+ EPOCHS = 2
12
  LEARNING_RATE = 2e-4
13
+ FACTOR = 22 * 69
14
  MAX_SEQ_LENGTH = 128
15
  VOCAB_SIZE = 32000
16
  INPUT_DATASET = "HuggingFaceTB/smollm-corpus"
 
24
 
25
  def load_data():
26
  pretrain = load_dataset(INPUT_DATASET, "cosmopedia-v2", split="train", streaming=True)
27
+ pretrain = Dataset.from_generator(lambda: pretrain.take(int(3e+4)))
28
  instruct = load_dataset(INSTRUCT_DATASET, split="train", streaming=True)
29
+ instruct = Dataset.from_generator(lambda: instruct.take(int(5e+4)))
30
  dataset_dict = DatasetDict({
31
  'pretrain': pretrain,
32
  'instruct': instruct