nroggendorff commited on
Commit
198dd52
·
verified ·
1 Parent(s): 1983b74

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -8
app.py CHANGED
@@ -10,7 +10,7 @@ from datasets import load_dataset
10
  from tokenizers import ByteLevelBPETokenizer
11
  import trl
12
 
13
- dataset = load_dataset("nroggendorff/openhermes", split="train").select(range(int(4e+4)))
14
 
15
  def get_training_corpus():
16
  for i in range(0, len(dataset), 1000):
@@ -60,11 +60,11 @@ print(tokenizer.apply_chat_template([{"role": "user", "content": "Why is the sky
60
 
61
  config = LlamaConfig(
62
  vocab_size=tokenizer.vocab_size,
63
- hidden_size=512,
64
- intermediate_size=1024,
65
- num_hidden_layers=8,
66
- num_attention_heads=8,
67
- max_position_embeddings=512,
68
  rms_norm_eps=1e-6,
69
  initializer_range=0.02,
70
  use_cache=True,
@@ -98,8 +98,8 @@ print(dataset['text'][2])
98
 
99
  args = TrainingArguments(
100
  output_dir="mayo",
101
- num_train_epochs=16,
102
- per_device_train_batch_size=64,
103
  gradient_accumulation_steps=4,
104
  learning_rate=1e-5,
105
  save_steps=100000,
 
10
  from tokenizers import ByteLevelBPETokenizer
11
  import trl
12
 
13
+ dataset = load_dataset("nroggendorff/openhermes", split="train").select(range(int(2e+4)))
14
 
15
  def get_training_corpus():
16
  for i in range(0, len(dataset), 1000):
 
60
 
61
  config = LlamaConfig(
62
  vocab_size=tokenizer.vocab_size,
63
+ hidden_size=512 * 4,
64
+ intermediate_size=1024 * 4,
65
+ num_hidden_layers=8 * 4,
66
+ num_attention_heads=8 * 4,
67
+ max_position_embeddings=512 * 4,
68
  rms_norm_eps=1e-6,
69
  initializer_range=0.02,
70
  use_cache=True,
 
98
 
99
  args = TrainingArguments(
100
  output_dir="mayo",
101
+ num_train_epochs=2,
102
+ per_device_train_batch_size=16,
103
  gradient_accumulation_steps=4,
104
  learning_rate=1e-5,
105
  save_steps=100000,