nroggendorff commited on
Commit
c26b20b
·
verified ·
1 Parent(s): b39abab

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -6
app.py CHANGED
@@ -9,7 +9,7 @@ import requests as rq
9
  import gc
10
  from tokenizers import ByteLevelBPETokenizer
11
 
12
- dataset = load_dataset("nroggendorff/openhermes", split="train").select(range(int(1e+5)))
13
 
14
  def get_training_corpus():
15
  for i in range(0, len(dataset), 1000):
@@ -59,10 +59,10 @@ print(tokenizer.apply_chat_template([{"role": "user", "content": "Why is the sky
59
 
60
  config = LlamaConfig(
61
  vocab_size=tokenizer.vocab_size,
62
- hidden_size=int(512 / 8),
63
- intermediate_size=int(1024 / 8),
64
- num_hidden_layers=int(8 / 4),
65
- num_attention_heads=int(8 / 4),
66
  max_position_embeddings=int(512 / 1),
67
  rms_norm_eps=1e-6,
68
  initializer_range=0.02,
@@ -97,7 +97,7 @@ print(dataset['text'][2])
97
 
98
  args = TrainingArguments(
99
  output_dir="mayo",
100
- num_train_epochs=1,
101
  gradient_accumulation_steps=4,
102
  per_device_train_batch_size=16,
103
  learning_rate=1e-5,
 
9
  import gc
10
  from tokenizers import ByteLevelBPETokenizer
11
 
12
+ dataset = load_dataset("nroggendorff/openhermes", split="train").select(range(int(1e+4)))
13
 
14
  def get_training_corpus():
15
  for i in range(0, len(dataset), 1000):
 
59
 
60
  config = LlamaConfig(
61
  vocab_size=tokenizer.vocab_size,
62
+ hidden_size=int(512 / 1),
63
+ intermediate_size=int(1024 / 1),
64
+ num_hidden_layers=int(8 / 1),
65
+ num_attention_heads=int(8 / 1),
66
  max_position_embeddings=int(512 / 1),
67
  rms_norm_eps=1e-6,
68
  initializer_range=0.02,
 
97
 
98
  args = TrainingArguments(
99
  output_dir="mayo",
100
+ num_train_epochs=2,
101
  gradient_accumulation_steps=4,
102
  per_device_train_batch_size=16,
103
  learning_rate=1e-5,