TimurHromek commited on
Commit
95d187a
·
1 Parent(s): dd6c81d

Uploaded model code and more.

Browse files
Files changed (4) hide show
  1. HROM_Trainer.py +360 -0
  2. LICENSE +201 -0
  3. app.py +104 -0
  4. tokenizer/hrom_tokenizer.json +0 -0
HROM_Trainer.py ADDED
@@ -0,0 +1,360 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ from torch.utils.data import Dataset, DataLoader
4
+ from datasets import load_dataset
5
+ from tokenizers import Tokenizer, models, trainers, pre_tokenizers, processors, decoders
6
+ import math
7
+ import os
8
+ import re
9
+ from datetime import datetime
10
+ from contextlib import nullcontext
11
+
12
+ # Configuration
13
+ CONFIG = {
14
+ "dim": 512,
15
+ "n_layers": 6,
16
+ "n_heads": 8,
17
+ "ff_dim": 2048,
18
+ "dropout": 0.1,
19
+ "max_seq_len": 1024,
20
+ "batch_size": 32,
21
+ "checkpoint_interval": 1000,
22
+ "debug_interval": 500,
23
+ "dataset": "daily_dialog",
24
+ "vocab_size": 32000,
25
+ "tokenizer_train_samples": 100000,
26
+ "learning_rate": 3e-4,
27
+ "max_turns": 6,
28
+ "max_checkpoints": 5,
29
+ "num_epochs": 50 # Increased number of epochs for longer training
30
+ }
31
+
32
+ class RotaryEmbedding(nn.Module):
33
+ def __init__(self, dim):
34
+ super().__init__()
35
+ inv_freq = 1.0 / (10000 ** (torch.arange(0, dim, 2).float() / dim))
36
+ self.register_buffer("inv_freq", inv_freq)
37
+
38
+ def forward(self, seq_len):
39
+ t = torch.arange(seq_len, device=self.inv_freq.device).type_as(self.inv_freq)
40
+ freqs = torch.einsum("i, j -> i j", t, self.inv_freq)
41
+ return torch.cat((freqs, freqs), dim=-1)
42
+
43
+ def rotate_half(x):
44
+ x1, x2 = x.chunk(2, dim=-1)
45
+ return torch.cat((-x2, x1), dim=-1)
46
+
47
+ def apply_rotary_pos_emb(pos, t):
48
+ pos = pos.unsqueeze(0).unsqueeze(1)
49
+ return (t * pos.cos()) + (rotate_half(t) * pos.sin())
50
+
51
+ class SwiGLU(nn.Module):
52
+ def forward(self, x):
53
+ x, gate = x.chunk(2, dim=-1)
54
+ return x * torch.sigmoid(gate)
55
+
56
+ class HROMAttention(nn.Module):
57
+ def __init__(self):
58
+ super().__init__()
59
+ self.dim = CONFIG["dim"]
60
+ self.n_heads = CONFIG["n_heads"]
61
+ self.head_dim = self.dim // self.n_heads
62
+ self.qkv = nn.Linear(self.dim, 3 * self.dim)
63
+ self.proj = nn.Linear(self.dim, self.dim)
64
+ self.rotary = RotaryEmbedding(self.head_dim)
65
+ self.dropout = nn.Dropout(CONFIG["dropout"])
66
+
67
+ def forward(self, x, mask=None):
68
+ B, T, _ = x.shape
69
+ qkv = self.qkv(x).reshape(B, T, 3, self.n_heads, self.head_dim)
70
+ q, k, v = qkv.unbind(2)
71
+ q = q.transpose(1, 2)
72
+ k = k.transpose(1, 2)
73
+ v = v.transpose(1, 2)
74
+ pos = self.rotary(T)
75
+ q = apply_rotary_pos_emb(pos, q)
76
+ k = apply_rotary_pos_emb(pos, k)
77
+ attn = (q @ k.transpose(-2, -1)) * (1.0 / math.sqrt(self.head_dim))
78
+ if mask is not None:
79
+ mask = mask.unsqueeze(1)
80
+ attn = attn + mask
81
+ attn = torch.softmax(attn, dim=-1)
82
+ attn = self.dropout(attn)
83
+ out = attn @ v
84
+ out = out.transpose(1, 2).reshape(B, T, self.dim)
85
+ return self.proj(out)
86
+
87
+ class HROMBlock(nn.Module):
88
+ def __init__(self):
89
+ super().__init__()
90
+ self.attn = HROMAttention()
91
+ self.ff = nn.Sequential(
92
+ nn.Linear(CONFIG["dim"], 2 * CONFIG["ff_dim"]),
93
+ SwiGLU(),
94
+ nn.Linear(CONFIG["ff_dim"], CONFIG["dim"])
95
+ )
96
+ self.norm1 = nn.LayerNorm(CONFIG["dim"])
97
+ self.norm2 = nn.LayerNorm(CONFIG["dim"])
98
+ self.dropout = nn.Dropout(CONFIG["dropout"])
99
+
100
+ def forward(self, x, mask=None):
101
+ x = x + self.dropout(self.attn(self.norm1(x), mask))
102
+ x = x + self.dropout(self.ff(self.norm2(x)))
103
+ return x
104
+
105
+ class HROM(nn.Module):
106
+ def __init__(self):
107
+ super().__init__()
108
+ self.embed = nn.Embedding(CONFIG["vocab_size"], CONFIG["dim"])
109
+ self.blocks = nn.ModuleList([HROMBlock() for _ in range(CONFIG["n_layers"])])
110
+ self.norm = nn.LayerNorm(CONFIG["dim"])
111
+ self.head = nn.Linear(CONFIG["dim"], CONFIG["vocab_size"])
112
+ self.apply(self._init_weights)
113
+
114
+ def _init_weights(self, module):
115
+ if isinstance(module, nn.Linear):
116
+ torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)
117
+ if module.bias is not None:
118
+ torch.nn.init.zeros_(module.bias)
119
+
120
+ def forward(self, x, attention_mask=None):
121
+ x = self.embed(x)
122
+ if attention_mask is not None:
123
+ B, T = attention_mask.shape
124
+ causal_mask = torch.triu(torch.ones(T, T) * float('-inf'), diagonal=1)
125
+ causal_mask = causal_mask.to(x.device)
126
+ pad_mask = attention_mask.unsqueeze(1).unsqueeze(2).to(dtype=torch.float32)
127
+ pad_mask = (1.0 - pad_mask) * torch.finfo(torch.float32).min
128
+ mask = causal_mask + pad_mask.squeeze(1)
129
+ else:
130
+ B, T = x.shape[:2]
131
+ mask = torch.triu(torch.ones(T, T) * float('-inf'), diagonal=1)
132
+ mask = mask.to(x.device)
133
+ mask = mask.unsqueeze(0).expand(B, -1, -1)
134
+ for block in self.blocks:
135
+ x = block(x, mask)
136
+ return self.head(self.norm(x))
137
+
138
+ class TokenizerTrainer:
139
+ def __init__(self):
140
+ self.tokenizer = Tokenizer(models.BPE())
141
+ self.tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel(add_prefix_space=True)
142
+ self.tokenizer.decoder = decoders.ByteLevel()
143
+ self.special_tokens = ["<pad>", "<s>", "</s>", "<unk>", "<user>", "<assistant>"]
144
+
145
+ def train(self, dataset_name):
146
+ dataset = load_dataset(dataset_name, split=f"train[:{CONFIG['tokenizer_train_samples']}]")
147
+ text_samples = []
148
+ for entry in dataset:
149
+ if "dialog" in entry:
150
+ for i, utterance in enumerate(entry["dialog"][:CONFIG["max_turns"]]):
151
+ role = "<user>" if i % 2 == 0 else "<assistant>"
152
+ text_samples.append(f"{role} {utterance}")
153
+ else:
154
+ text_samples.append(self._clean_text(entry.get("text", "")))
155
+ trainer = trainers.BpeTrainer(
156
+ vocab_size=CONFIG["vocab_size"],
157
+ special_tokens=self.special_tokens,
158
+ min_frequency=2,
159
+ show_progress=True
160
+ )
161
+ self.tokenizer.train_from_iterator(text_samples, trainer=trainer, length=len(text_samples))
162
+ self.tokenizer.post_processor = processors.TemplateProcessing(
163
+ single="$A </s>",
164
+ pair="$A $B </s>",
165
+ special_tokens=[("</s>", self.tokenizer.token_to_id("</s>"))],
166
+ )
167
+ os.makedirs("tokenizer", exist_ok=True)
168
+ self.tokenizer.save("tokenizer/hrom_tokenizer.json")
169
+
170
+ def _clean_text(self, text):
171
+ text = re.sub(r'[^\w\s.,!?\'\-:;<>]', '', text)
172
+ text = re.sub(r'\s+', ' ', text).strip()
173
+ return text
174
+
175
+ class ChatDataset(Dataset):
176
+ def __init__(self, tokenizer):
177
+ full_dataset = load_dataset(CONFIG["dataset"], split="train")
178
+ num_samples = min(len(full_dataset), CONFIG["tokenizer_train_samples"])
179
+ self.dataset = full_dataset.shuffle(seed=42).select(range(num_samples))
180
+ self.tokenizer = tokenizer
181
+ self.max_length = CONFIG["max_seq_len"]
182
+ self.turn_sep = self.tokenizer.token_to_id("</s>")
183
+
184
+ def __len__(self):
185
+ return len(self.dataset)
186
+
187
+ def __getitem__(self, idx):
188
+ entry = self.dataset[idx]
189
+ formatted = []
190
+ if "dialog" in entry:
191
+ dialog = entry["dialog"][:CONFIG["max_turns"]]
192
+ for i, utterance in enumerate(dialog):
193
+ role_token = "<user>" if i % 2 == 0 else "<assistant>"
194
+ formatted.extend([
195
+ self.tokenizer.token_to_id(role_token),
196
+ *self.tokenizer.encode(utterance).ids,
197
+ self.turn_sep
198
+ ])
199
+ else:
200
+ text = entry.get("text", "")
201
+ formatted.extend([
202
+ self.tokenizer.token_to_id("<user>"),
203
+ *self.tokenizer.encode(text).ids,
204
+ self.turn_sep
205
+ ])
206
+ formatted = formatted[:self.max_length-2]
207
+ formatted = [self.tokenizer.token_to_id("<s>"), *formatted, self.tokenizer.token_to_id("</s>")]
208
+ return {
209
+ "input_ids": formatted[:-1],
210
+ "labels": formatted[1:]
211
+ }
212
+
213
+ @staticmethod
214
+ def collate_fn(batch):
215
+ max_len = max(len(item["input_ids"]) for item in batch)
216
+ pad_id = Tokenizer.from_file("tokenizer/hrom_tokenizer.json").token_to_id("<pad>")
217
+ inputs, labels, masks = [], [], []
218
+ for item in batch:
219
+ pad_len = max_len - len(item["input_ids"])
220
+ inputs.append(item["input_ids"] + [pad_id] * pad_len)
221
+ labels.append(item["labels"] + [pad_id] * pad_len)
222
+ masks.append([1] * len(item["input_ids"]) + [0] * pad_len)
223
+ return {
224
+ "input_ids": torch.tensor(inputs),
225
+ "labels": torch.tensor(labels),
226
+ "attention_mask": torch.tensor(masks)
227
+ }
228
+
229
+ class HROMTrainer:
230
+ def __init__(self, model, tokenizer):
231
+ self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
232
+ self.model = model.to(self.device)
233
+ if self.device.type == "cuda":
234
+ self.scaler = torch.cuda.amp.GradScaler()
235
+ else:
236
+ self.scaler = None
237
+ self.optimizer = torch.optim.AdamW(
238
+ self.model.parameters(),
239
+ lr=CONFIG["learning_rate"],
240
+ fused=True if self.device.type == "cuda" else False
241
+ )
242
+ self.tokenizer = tokenizer
243
+
244
+ def train_step(self, batch):
245
+ self.optimizer.zero_grad()
246
+ autocast = torch.cuda.amp.autocast if self.device.type == "cuda" else nullcontext
247
+ with autocast():
248
+ outputs = self.model(
249
+ batch["input_ids"].to(self.device),
250
+ attention_mask=batch["attention_mask"].to(self.device)
251
+ )
252
+ loss = nn.CrossEntropyLoss(ignore_index=self.tokenizer.token_to_id("<pad>"))(
253
+ outputs.view(-1, CONFIG["vocab_size"]),
254
+ batch["labels"].view(-1).to(self.device)
255
+ )
256
+ if self.scaler is not None:
257
+ self.scaler.scale(loss).backward()
258
+ torch.nn.utils.clip_grad_norm_(self.model.parameters(), 1.0)
259
+ self.scaler.step(self.optimizer)
260
+ self.scaler.update()
261
+ else:
262
+ loss.backward()
263
+ torch.nn.utils.clip_grad_norm_(self.model.parameters(), 1.0)
264
+ self.optimizer.step()
265
+ return loss.item()
266
+
267
+ class SafetyManager:
268
+ def __init__(self, model, tokenizer):
269
+ self.model = model
270
+ self.tokenizer = tokenizer
271
+ self.bad_words = ["hate", "kill", "harm"]
272
+ self.bad_word_ids = [tokenizer.encode(w).ids for w in self.bad_words]
273
+
274
+ def content_filter(self, text):
275
+ tokens = self.tokenizer.encode(text).ids
276
+ for bad_ids in self.bad_word_ids:
277
+ if any(tokens[i:i+len(bad_ids)] == bad_ids for i in range(len(tokens))):
278
+ return False
279
+ return True
280
+
281
+ def generate_safely(self, prompt, max_length=50):
282
+ input_ids = self.tokenizer.encode(prompt).ids
283
+ device = next(self.model.parameters()).device
284
+ for _ in range(max_length):
285
+ with torch.no_grad():
286
+ logits = self.model(torch.tensor([input_ids]).to(device))
287
+ next_token = logits.argmax(-1)[:, -1].item()
288
+ if next_token == self.tokenizer.token_to_id("</s>"):
289
+ break
290
+ generated = self.tokenizer.decode(input_ids + [next_token])
291
+ if not self.content_filter(generated):
292
+ break
293
+ input_ids.append(next_token)
294
+ return self.tokenizer.decode(input_ids)
295
+
296
+ def debug_generation(self, prompt="Hello!"):
297
+ print(f"\nSafety Check Generation:")
298
+ response = self.generate_safely(prompt)
299
+ print(f"Prompt: {prompt}\nResponse: {response}")
300
+
301
+ class CheckpointManager:
302
+ def __init__(self):
303
+ self.checkpoint_dir = "checkpoints"
304
+ os.makedirs(self.checkpoint_dir, exist_ok=True)
305
+
306
+ def save(self, model, optimizer, step):
307
+ timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
308
+ path = f"{self.checkpoint_dir}/hrom_{timestamp}_step{step}.pt"
309
+ torch.save({
310
+ "model": model.state_dict(),
311
+ "optimizer": optimizer.state_dict(),
312
+ "step": step,
313
+ "config": CONFIG
314
+ }, path)
315
+ self._cleanup_old_checkpoints()
316
+
317
+ def _cleanup_old_checkpoints(self):
318
+ checkpoints = sorted(os.listdir(self.checkpoint_dir),
319
+ key=lambda x: os.path.getmtime(os.path.join(self.checkpoint_dir, x)))
320
+ while len(checkpoints) > CONFIG["max_checkpoints"]:
321
+ os.remove(os.path.join(self.checkpoint_dir, checkpoints[0]))
322
+ checkpoints = checkpoints[1:]
323
+
324
+ def train():
325
+ checkpoint_manager = CheckpointManager()
326
+ if not os.path.exists("tokenizer/hrom_tokenizer.json"):
327
+ print("Training tokenizer...")
328
+ tokenizer_trainer = TokenizerTrainer()
329
+ tokenizer_trainer.train(CONFIG["dataset"])
330
+
331
+ tokenizer = Tokenizer.from_file("tokenizer/hrom_tokenizer.json")
332
+ model = HROM()
333
+ print("Downloading and caching the dataset...")
334
+ _ = load_dataset(CONFIG["dataset"], split="train", download_mode="reuse_cache_if_exists")
335
+
336
+ dataset = ChatDataset(tokenizer)
337
+ dataloader = DataLoader(
338
+ dataset,
339
+ batch_size=CONFIG["batch_size"],
340
+ collate_fn=ChatDataset.collate_fn
341
+ )
342
+
343
+ trainer_obj = HROMTrainer(model, tokenizer)
344
+ safety = SafetyManager(model, tokenizer)
345
+
346
+ step = 0
347
+ model.train()
348
+ for epoch in range(CONFIG["num_epochs"]):
349
+ for batch in dataloader:
350
+ loss = trainer_obj.train_step(batch)
351
+ if step % CONFIG["checkpoint_interval"] == 0:
352
+ checkpoint_manager.save(model, trainer_obj.optimizer, step)
353
+ safety.debug_generation()
354
+ if step % CONFIG["debug_interval"] == 0:
355
+ print(f"Step {step} | Loss: {loss:.4f}")
356
+ safety.debug_generation("What's the meaning of life?")
357
+ step += 1
358
+
359
+ if __name__ == "__main__":
360
+ train()
LICENSE ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Apache License
2
+ Version 2.0, January 2004
3
+ http://www.apache.org/licenses/
4
+
5
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6
+
7
+ 1. Definitions.
8
+
9
+ "License" shall mean the terms and conditions for use, reproduction,
10
+ and distribution as defined by Sections 1 through 9 of this document.
11
+
12
+ "Licensor" shall mean the copyright owner or entity authorized by
13
+ the copyright owner that is granting the License.
14
+
15
+ "Legal Entity" shall mean the union of the acting entity and all
16
+ other entities that control, are controlled by, or are under common
17
+ control with that entity. For the purposes of this definition,
18
+ "control" means (i) the power, direct or indirect, to cause the
19
+ direction or management of such entity, whether by contract or
20
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
21
+ outstanding shares, or (iii) beneficial ownership of such entity.
22
+
23
+ "You" (or "Your") shall mean an individual or Legal Entity
24
+ exercising permissions granted by this License.
25
+
26
+ "Source" form shall mean the preferred form for making modifications,
27
+ including but not limited to software source code, documentation
28
+ source, and configuration files.
29
+
30
+ "Object" form shall mean any form resulting from mechanical
31
+ transformation or translation of a Source form, including but
32
+ not limited to compiled object code, generated documentation,
33
+ and conversions to other media types.
34
+
35
+ "Work" shall mean the work of authorship, whether in Source or
36
+ Object form, made available under the License, as indicated by a
37
+ copyright notice that is included in or attached to the work
38
+ (an example is provided in the Appendix below).
39
+
40
+ "Derivative Works" shall mean any work, whether in Source or Object
41
+ form, that is based on (or derived from) the Work and for which the
42
+ editorial revisions, annotations, elaborations, or other modifications
43
+ represent, as a whole, an original work of authorship. For the purposes
44
+ of this License, Derivative Works shall not include works that remain
45
+ separable from, or merely link (or bind by name) to the interfaces of,
46
+ the Work and Derivative Works thereof.
47
+
48
+ "Contribution" shall mean any work of authorship, including
49
+ the original version of the Work and any modifications or additions
50
+ to that Work or Derivative Works thereof, that is intentionally
51
+ submitted to Licensor for inclusion in the Work by the copyright owner
52
+ or by an individual or Legal Entity authorized to submit on behalf of
53
+ the copyright owner. For the purposes of this definition, "submitted"
54
+ means any form of electronic, verbal, or written communication sent
55
+ to the Licensor or its representatives, including but not limited to
56
+ communication on electronic mailing lists, source code control systems,
57
+ and issue tracking systems that are managed by, or on behalf of, the
58
+ Licensor for the purpose of discussing and improving the Work, but
59
+ excluding communication that is conspicuously marked or otherwise
60
+ designated in writing by the copyright owner as "Not a Contribution."
61
+
62
+ "Contributor" shall mean Licensor and any individual or Legal Entity
63
+ on behalf of whom a Contribution has been received by Licensor and
64
+ subsequently incorporated within the Work.
65
+
66
+ 2. Grant of Copyright License. Subject to the terms and conditions of
67
+ this License, each Contributor hereby grants to You a perpetual,
68
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69
+ copyright license to reproduce, prepare Derivative Works of,
70
+ publicly display, publicly perform, sublicense, and distribute the
71
+ Work and such Derivative Works in Source or Object form.
72
+
73
+ 3. Grant of Patent License. Subject to the terms and conditions of
74
+ this License, each Contributor hereby grants to You a perpetual,
75
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76
+ (except as stated in this section) patent license to make, have made,
77
+ use, offer to sell, sell, import, and otherwise transfer the Work,
78
+ where such license applies only to those patent claims licensable
79
+ by such Contributor that are necessarily infringed by their
80
+ Contribution(s) alone or by combination of their Contribution(s)
81
+ with the Work to which such Contribution(s) was submitted. If You
82
+ institute patent litigation against any entity (including a
83
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
84
+ or a Contribution incorporated within the Work constitutes direct
85
+ or contributory patent infringement, then any patent licenses
86
+ granted to You under this License for that Work shall terminate
87
+ as of the date such litigation is filed.
88
+
89
+ 4. Redistribution. You may reproduce and distribute copies of the
90
+ Work or Derivative Works thereof in any medium, with or without
91
+ modifications, and in Source or Object form, provided that You
92
+ meet the following conditions:
93
+
94
+ (a) You must give any other recipients of the Work or
95
+ Derivative Works a copy of this License; and
96
+
97
+ (b) You must cause any modified files to carry prominent notices
98
+ stating that You changed the files; and
99
+
100
+ (c) You must retain, in the Source form of any Derivative Works
101
+ that You distribute, all copyright, patent, trademark, and
102
+ attribution notices from the Source form of the Work,
103
+ excluding those notices that do not pertain to any part of
104
+ the Derivative Works; and
105
+
106
+ (d) If the Work includes a "NOTICE" text file as part of its
107
+ distribution, then any Derivative Works that You distribute must
108
+ include a readable copy of the attribution notices contained
109
+ within such NOTICE file, excluding those notices that do not
110
+ pertain to any part of the Derivative Works, in at least one
111
+ of the following places: within a NOTICE text file distributed
112
+ as part of the Derivative Works; within the Source form or
113
+ documentation, if provided along with the Derivative Works; or,
114
+ within a display generated by the Derivative Works, if and
115
+ wherever such third-party notices normally appear. The contents
116
+ of the NOTICE file are for informational purposes only and
117
+ do not modify the License. You may add Your own attribution
118
+ notices within Derivative Works that You distribute, alongside
119
+ or as an addendum to the NOTICE text from the Work, provided
120
+ that such additional attribution notices cannot be construed
121
+ as modifying the License.
122
+
123
+ You may add Your own copyright statement to Your modifications and
124
+ may provide additional or different license terms and conditions
125
+ for use, reproduction, or distribution of Your modifications, or
126
+ for any such Derivative Works as a whole, provided Your use,
127
+ reproduction, and distribution of the Work otherwise complies with
128
+ the conditions stated in this License.
129
+
130
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
131
+ any Contribution intentionally submitted for inclusion in the Work
132
+ by You to the Licensor shall be under the terms and conditions of
133
+ this License, without any additional terms or conditions.
134
+ Notwithstanding the above, nothing herein shall supersede or modify
135
+ the terms of any separate license agreement you may have executed
136
+ with Licensor regarding such Contributions.
137
+
138
+ 6. Trademarks. This License does not grant permission to use the trade
139
+ names, trademarks, service marks, or product names of the Licensor,
140
+ except as required for reasonable and customary use in describing the
141
+ origin of the Work and reproducing the content of the NOTICE file.
142
+
143
+ 7. Disclaimer of Warranty. Unless required by applicable law or
144
+ agreed to in writing, Licensor provides the Work (and each
145
+ Contributor provides its Contributions) on an "AS IS" BASIS,
146
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147
+ implied, including, without limitation, any warranties or conditions
148
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149
+ PARTICULAR PURPOSE. You are solely responsible for determining the
150
+ appropriateness of using or redistributing the Work and assume any
151
+ risks associated with Your exercise of permissions under this License.
152
+
153
+ 8. Limitation of Liability. In no event and under no legal theory,
154
+ whether in tort (including negligence), contract, or otherwise,
155
+ unless required by applicable law (such as deliberate and grossly
156
+ negligent acts) or agreed to in writing, shall any Contributor be
157
+ liable to You for damages, including any direct, indirect, special,
158
+ incidental, or consequential damages of any character arising as a
159
+ result of this License or out of the use or inability to use the
160
+ Work (including but not limited to damages for loss of goodwill,
161
+ work stoppage, computer failure or malfunction, or any and all
162
+ other commercial damages or losses), even if such Contributor
163
+ has been advised of the possibility of such damages.
164
+
165
+ 9. Accepting Warranty or Additional Liability. While redistributing
166
+ the Work or Derivative Works thereof, You may choose to offer,
167
+ and charge a fee for, acceptance of support, warranty, indemnity,
168
+ or other liability obligations and/or rights consistent with this
169
+ License. However, in accepting such obligations, You may act only
170
+ on Your own behalf and on Your sole responsibility, not on behalf
171
+ of any other Contributor, and only if You agree to indemnify,
172
+ defend, and hold each Contributor harmless for any liability
173
+ incurred by, or claims asserted against, such Contributor by reason
174
+ of your accepting any such warranty or additional liability.
175
+
176
+ END OF TERMS AND CONDITIONS
177
+
178
+ APPENDIX: How to apply the Apache License to your work.
179
+
180
+ To apply the Apache License to your work, attach the following
181
+ boilerplate notice, with the fields enclosed by brackets "[]"
182
+ replaced with your own identifying information. (Don't include
183
+ the brackets!) The text should be enclosed in the appropriate
184
+ comment syntax for the file format. We also recommend that a
185
+ file or class name and description of purpose be included on the
186
+ same "printed page" as the copyright notice for easier
187
+ identification within third-party archives.
188
+
189
+ Copyright 2025 Timur Hromek
190
+
191
+ Licensed under the Apache License, Version 2.0 (the "License");
192
+ you may not use this file except in compliance with the License.
193
+ You may obtain a copy of the License at
194
+
195
+ http://www.apache.org/licenses/LICENSE-2.0
196
+
197
+ Unless required by applicable law or agreed to in writing, software
198
+ distributed under the License is distributed on an "AS IS" BASIS,
199
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200
+ See the License for the specific language governing permissions and
201
+ limitations under the License.
app.py ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ from tokenizers import Tokenizer
4
+ import os
5
+ from HROM_Trainer import HROM, CONFIG, SafetyManager
6
+
7
+ def load_latest_checkpoint(model, device):
8
+ checkpoint_dir = "checkpoints"
9
+ checkpoints = [f for f in os.listdir(checkpoint_dir) if f.endswith(".pt")]
10
+ if not checkpoints:
11
+ raise FileNotFoundError("No checkpoints found.")
12
+ checkpoints = sorted(checkpoints, key=lambda x: os.path.getmtime(os.path.join(checkpoint_dir, x)), reverse=True)
13
+ latest_checkpoint = os.path.join(checkpoint_dir, checkpoints[0])
14
+ checkpoint = torch.load(latest_checkpoint, map_location=device)
15
+ model.load_state_dict(checkpoint['model'])
16
+ return model
17
+
18
+ def generate_response(model, tokenizer, input_ids, safety_manager, max_length=200):
19
+ device = next(model.parameters()).device
20
+ generated_ids = input_ids.copy()
21
+ for _ in range(max_length):
22
+ input_tensor = torch.tensor([generated_ids], device=device)
23
+ with torch.no_grad():
24
+ logits = model(input_tensor)
25
+ next_token = logits.argmax(-1)[:, -1].item()
26
+ if next_token == tokenizer.token_to_id("</s>"):
27
+ break
28
+ current_text = tokenizer.decode(generated_ids + [next_token])
29
+ if not safety_manager.content_filter(current_text):
30
+ break
31
+ generated_ids.append(next_token)
32
+ return generated_ids[len(input_ids):]
33
+
34
+ # Initialize components once
35
+ tokenizer = Tokenizer.from_file("tokenizer/hrom_tokenizer.json")
36
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
37
+ model = HROM().to(device)
38
+ model = load_latest_checkpoint(model, device)
39
+ model.eval()
40
+ safety = SafetyManager(model, tokenizer)
41
+ max_response_length = 200
42
+
43
+ def process_message(user_input, chat_history, token_history):
44
+ # Process user input
45
+ user_turn = f"<user> {user_input} </s>"
46
+ user_tokens = tokenizer.encode(user_turn).ids
47
+ token_history.extend(user_tokens)
48
+
49
+ # Prepare input sequence
50
+ input_sequence = [tokenizer.token_to_id("<s>")] + token_history
51
+
52
+ # Truncate if needed
53
+ max_input_len = CONFIG["max_seq_len"] - max_response_length
54
+ if len(input_sequence) > max_input_len:
55
+ input_sequence = input_sequence[-max_input_len:]
56
+ token_history = input_sequence[1:]
57
+
58
+ # Generate response
59
+ response_ids = generate_response(model, tokenizer, input_sequence, safety, max_response_length)
60
+
61
+ # Process assistant response
62
+ assistant_text = "I couldn't generate a proper response."
63
+ if response_ids:
64
+ if response_ids[0] == tokenizer.token_to_id("<assistant>"):
65
+ try:
66
+ end_idx = response_ids.index(tokenizer.token_to_id("</s>"))
67
+ assistant_text = tokenizer.decode(response_ids[1:end_idx])
68
+ token_history.extend(response_ids[:end_idx+1])
69
+ except ValueError:
70
+ assistant_text = tokenizer.decode(response_ids[1:])
71
+ token_history.extend(response_ids)
72
+ else:
73
+ assistant_text = tokenizer.decode(response_ids)
74
+ token_history.extend(response_ids)
75
+
76
+ chat_history.append((user_input, assistant_text))
77
+ return chat_history, token_history
78
+
79
+ def clear_history():
80
+ return [], []
81
+
82
+ with gr.Blocks() as demo:
83
+ gr.Markdown("# HROM Chatbot")
84
+ chatbot = gr.Chatbot(height=500)
85
+ msg = gr.Textbox(label="Your Message")
86
+ token_state = gr.State([])
87
+
88
+ msg.submit(
89
+ process_message,
90
+ [msg, chatbot, token_state],
91
+ [chatbot, token_state],
92
+ queue=False
93
+ ).then(
94
+ lambda: "", None, msg
95
+ )
96
+
97
+ clear_btn = gr.Button("Clear Chat History")
98
+ clear_btn.click(
99
+ clear_history,
100
+ outputs=[chatbot, token_state],
101
+ queue=False
102
+ )
103
+
104
+ demo.launch()
tokenizer/hrom_tokenizer.json ADDED
The diff for this file is too large to render. See raw diff