{"metadata":{"kernelspec":{"language":"python","display_name":"Python 3","name":"python3"},"language_info":{"name":"python","version":"3.10.12","mimetype":"text/x-python","codemirror_mode":{"name":"ipython","version":3},"pygments_lexer":"ipython3","nbconvert_exporter":"python","file_extension":".py"},"kaggle":{"accelerator":"gpu","dataSources":[{"sourceId":10477901,"sourceType":"datasetVersion","datasetId":6487916}],"dockerImageVersionId":30840,"isInternetEnabled":true,"language":"python","sourceType":"notebook","isGpuEnabled":true}},"nbformat_minor":4,"nbformat":4,"cells":[{"cell_type":"code","source":"# This Python 3 environment comes with many helpful analytics libraries installed\n# It is defined by the kaggle/python Docker image: https://github.com/kaggle/docker-python\n# For example, here's several helpful packages to load\n\nimport numpy as np # linear algebra\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\n\n# Input data files are available in the read-only \"../input/\" directory\n# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory\n\nimport os\nfor dirname, _, filenames in os.walk('/kaggle/input/transformers-input/input.txt'):\n for filename in filenames:\n print(os.path.join(dirname, filename))\n\n# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using \"Save & Run All\" \n# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session","metadata":{"_uuid":"8f2839f25d086af736a60e9eeb907d3b93b6e0e5","_cell_guid":"b1076dfc-b9ad-4769-8c92-a6c4dae69d19","trusted":true,"execution":{"iopub.status.busy":"2025-01-15T14:34:35.524291Z","iopub.execute_input":"2025-01-15T14:34:35.524674Z","iopub.status.idle":"2025-01-15T14:34:36.015800Z","shell.execute_reply.started":"2025-01-15T14:34:35.524644Z","shell.execute_reply":"2025-01-15T14:34:36.014727Z"}},"outputs":[],"execution_count":9},{"cell_type":"code","source":"# Solving for residual std scaling issue\nimport os\nimport math\nimport time\nimport inspect\nfrom dataclasses import dataclass\nimport torch\nimport torch.nn as nn\nfrom torch.nn import functional as F","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2025-01-16T01:11:14.295213Z","iopub.execute_input":"2025-01-16T01:11:14.295523Z","iopub.status.idle":"2025-01-16T01:11:17.362144Z","shell.execute_reply.started":"2025-01-16T01:11:14.295499Z","shell.execute_reply":"2025-01-16T01:11:17.361475Z"}},"outputs":[],"execution_count":1},{"cell_type":"code","source":"class CausalSelfAttention(nn.Module):\n\n def __init__(self, config):\n super().__init__()\n assert config.n_embd % config.n_head == 0\n # key, query, value projections for all heads, but in a batch\n self.c_attn = nn.Linear(config.n_embd, 3 * config.n_embd)\n # output projection\n self.c_proj = nn.Linear(config.n_embd, config.n_embd)\n self.c_proj.NANGPT_SCALE_INIT = 1\n # regularization\n self.n_head = config.n_head\n self.n_embd = config.n_embd\n self.register_buffer(\"bias\", torch.tril(torch.ones(config.block_size, config.block_size)).view(1, 1, config.block_size, config.block_size))\n\n def forward(self, x):\n B, T, C = x.size() # batch size, sequence length, embedding dimensionality (n_embd)\n # calculate query, key, values for all heads in batch and move head forward to be the batch dim\n # nh is \"number of heads\", hs is \"head size\", and C (number of channels) = nh * hs\n # e.g. in GPT-2 (124M), n_head=12, hs=64, so nh*hs=C=768 channels in the Transformer\n qkv = self.c_attn(x)\n q, k, v = qkv.split(self.n_embd, dim=2)\n k = k.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)\n q = q.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)\n v = v.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)\n\n att = (q @ k.transpose(-2, -1)) * (1.0 / math.sqrt(k.size(-1)))\n att = att.masked_fill(self.bias[:, :, :T, :T] == 0, float('-inf'))\n att = F.softmax(att, dim=-1)\n y = att @ v # (B, nh, T, T) x (B, nh, T, hs) -> (B, nh, T, hs)\n\n y = y.transpose(1, 2).contiguous().view(B, T, C) # re-assemble all head outputs side by side\n # output projection\n y = self.c_proj(y)\n return y\n","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2025-01-16T01:11:17.363161Z","iopub.execute_input":"2025-01-16T01:11:17.363491Z","iopub.status.idle":"2025-01-16T01:11:17.370629Z","shell.execute_reply.started":"2025-01-16T01:11:17.363471Z","shell.execute_reply":"2025-01-16T01:11:17.369710Z"}},"outputs":[],"execution_count":2},{"cell_type":"code","source":"class MLP(nn.Module):\n\n def __init__(self, config):\n super().__init__()\n self.c_fc = nn.Linear(config.n_embd, 4 * config.n_embd)\n self.gelu = nn.GELU(approximate='tanh')\n self.c_proj = nn.Linear(4 * config.n_embd, config.n_embd)\n self.c_proj.NANOGPT_SCALE_INIT = 1\n\n def forward(self, x):\n x = self.c_fc(x)\n x = self.gelu(x)\n x = self.c_proj(x)\n return x","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2025-01-16T01:11:19.837235Z","iopub.execute_input":"2025-01-16T01:11:19.837640Z","iopub.status.idle":"2025-01-16T01:11:19.843845Z","shell.execute_reply.started":"2025-01-16T01:11:19.837608Z","shell.execute_reply":"2025-01-16T01:11:19.842822Z"}},"outputs":[],"execution_count":3},{"cell_type":"code","source":"class Block(nn.Module):\n\n def __init__(self, config):\n super().__init__()\n self.ln_1 = nn.LayerNorm(config.n_embd)\n self.attn = CausalSelfAttention(config)\n self.ln_2 = nn.LayerNorm(config.n_embd)\n self.mlp = MLP(config)\n\n def forward(self, x):\n x = x + self.attn(self.ln_1(x))\n x = x + self.mlp(self.ln_2(x))\n return x","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2025-01-16T01:11:22.456129Z","iopub.execute_input":"2025-01-16T01:11:22.456449Z","iopub.status.idle":"2025-01-16T01:11:22.461192Z","shell.execute_reply.started":"2025-01-16T01:11:22.456421Z","shell.execute_reply":"2025-01-16T01:11:22.460364Z"}},"outputs":[],"execution_count":4},{"cell_type":"code","source":"@dataclass\nclass GPTConfig:\n block_size: int = 1024 # max sequence length\n vocab_size: int = 50257 # number of tokens: 50,000 BPE merges + 256 bytes tokens + 1 <|endoftext|> token\n n_layer: int = 12 # number of layers\n n_head: int = 12 # number of heads\n n_embd: int = 768 # embedding dimension","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2025-01-16T01:11:24.402909Z","iopub.execute_input":"2025-01-16T01:11:24.403223Z","iopub.status.idle":"2025-01-16T01:11:24.407719Z","shell.execute_reply.started":"2025-01-16T01:11:24.403198Z","shell.execute_reply":"2025-01-16T01:11:24.406984Z"}},"outputs":[],"execution_count":5},{"cell_type":"code","source":"class GPT(nn.Module):\n\n def __init__(self, config):\n super().__init__()\n self.config = config\n\n self.transformer = nn.ModuleDict(dict(\n wte = nn.Embedding(config.vocab_size, config.n_embd),\n wpe = nn.Embedding(config.block_size, config.n_embd),\n h = nn.ModuleList([Block(config) for _ in range(config.n_layer)]),\n ln_f = nn.LayerNorm(config.n_embd),\n ))\n self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)\n\n # weight sharing\n self.transformer.wte.weight = self.lm_head.weight\n\n # weight initialization\n self.apply(self._init_weights)\n\n def _init_weights(self, module):\n if isinstance(module, nn.Linear):\n std = 0.02\n if hasattr(module, 'NANGPT_SCALE_INIT'):\n std *= (2 * self.config.n_layer) ** -0.5\n torch.nn.init.normal_(module.weight, mean = 0.0, std = std)\n if module.bias is not None:\n torch.nn.init.zeros_(module.bias)\n elif isinstance(module, nn.Embedding):\n torch.nn.init.normal_(module.weight, mean=0.0, std = 0.02)\n\n\n\n def forward(self, idx, targets=None):\n # idx is of shape (B, T)\n B, T = idx.size()\n assert T <= self.config.block_size, f\"Cannot forward sequence of length {T}, block size is only {self.config.block_size}\"\n # forward the token and posisition embeddings\n pos = torch.arange(0, T, dtype=torch.long, device=idx.device) # shape (T)\n pos_emb = self.transformer.wpe(pos) # position embeddings of shape (T, n_embd)\n tok_emb = self.transformer.wte(idx) # token embeddings of shape (B, T, n_embd)\n x = tok_emb + pos_emb\n # forward the blocks of the transformer\n for block in self.transformer.h:\n x = block(x)\n # forward the final layernorm and the classifier\n x = self.transformer.ln_f(x)\n logits = self.lm_head(x) # (B, T, vocab_size)\n loss = None\n if targets is not None:\n loss = F.cross_entropy(logits.view(-1, logits.size(-1)), targets.view(-1))\n return logits, loss\n\n @classmethod\n def from_pretrained(cls, model_type):\n \"\"\"Loads pretrained GPT-2 model weights from huggingface\"\"\"\n assert model_type in {'gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'}\n from transformers import GPT2LMHeadModel\n print(\"loading weights from pretrained gpt: %s\" % model_type)\n\n # n_layer, n_head and n_embd are determined from model_type\n config_args = {\n 'gpt2': dict(n_layer=12, n_head=12, n_embd=768), # 124M params\n 'gpt2-medium': dict(n_layer=24, n_head=16, n_embd=1024), # 350M params\n 'gpt2-large': dict(n_layer=36, n_head=20, n_embd=1280), # 774M params\n 'gpt2-xl': dict(n_layer=48, n_head=25, n_embd=1600), # 1558M params\n }[model_type]\n config_args['vocab_size'] = 50257 # always 50257 for GPT model checkpoints\n config_args['block_size'] = 1024 # always 1024 for GPT model checkpoints\n # create a from-scratch initialized minGPT model\n config = GPTConfig(**config_args)\n model = GPT(config)\n sd = model.state_dict()\n sd_keys = sd.keys()\n sd_keys = [k for k in sd_keys if not k.endswith('.attn.bias')] # discard this mask / buffer, not a param\n\n # init a huggingface/transformers model\n model_hf = GPT2LMHeadModel.from_pretrained(model_type)\n sd_hf = model_hf.state_dict()\n\n # copy while ensuring all of the parameters are aligned and match in names and shapes\n sd_keys_hf = sd_hf.keys()\n sd_keys_hf = [k for k in sd_keys_hf if not k.endswith('.attn.masked_bias')] # ignore these, just a buffer\n sd_keys_hf = [k for k in sd_keys_hf if not k.endswith('.attn.bias')] # same, just the mask (buffer)\n transposed = ['attn.c_attn.weight', 'attn.c_proj.weight', 'mlp.c_fc.weight', 'mlp.c_proj.weight']\n # basically the openai checkpoints use a \"Conv1D\" module, but we only want to use a vanilla Linear\n # this means that we have to transpose these weights when we import them\n assert len(sd_keys_hf) == len(sd_keys), f\"mismatched keys: {len(sd_keys_hf)} != {len(sd_keys)}\"\n for k in sd_keys_hf:\n if any(k.endswith(w) for w in transposed):\n # special treatment for the Conv1D weights we need to transpose\n assert sd_hf[k].shape[::-1] == sd[k].shape\n with torch.no_grad():\n sd[k].copy_(sd_hf[k].t())\n else:\n # vanilla copy over the other parameters\n assert sd_hf[k].shape == sd[k].shape\n with torch.no_grad():\n sd[k].copy_(sd_hf[k])\n\n return model","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2025-01-16T01:11:26.530758Z","iopub.execute_input":"2025-01-16T01:11:26.531068Z","iopub.status.idle":"2025-01-16T01:11:26.543967Z","shell.execute_reply.started":"2025-01-16T01:11:26.531045Z","shell.execute_reply":"2025-01-16T01:11:26.542879Z"}},"outputs":[],"execution_count":6},{"cell_type":"code","source":"# model = GPT.from_pretrained('gpt2')\n\ndevice = 'cpu'\nif torch.cuda.is_available():\n device = 'cuda'\nelif hasattr(torch.backends, \"mps\") and torch.backends.mps.is_available():\n device = \"mps\"\nprint(f\"using device: {device}\")\n\n# SEED\ntorch.manual_seed(1337)\nif torch.cuda.is_available():\n torch.cuda.manual_seed(1337)\n\n# STOP\nnum_return_sequences = 5\nmax_length = 30","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2025-01-16T01:11:31.096650Z","iopub.execute_input":"2025-01-16T01:11:31.096934Z","iopub.status.idle":"2025-01-16T01:11:31.154262Z","shell.execute_reply.started":"2025-01-16T01:11:31.096913Z","shell.execute_reply":"2025-01-16T01:11:31.153422Z"}},"outputs":[{"name":"stdout","text":"using device: cuda\n","output_type":"stream"}],"execution_count":7},{"cell_type":"code","source":"!pip install tiktoken","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2025-01-16T01:11:33.747825Z","iopub.execute_input":"2025-01-16T01:11:33.748149Z","iopub.status.idle":"2025-01-16T01:11:38.202233Z","shell.execute_reply.started":"2025-01-16T01:11:33.748124Z","shell.execute_reply":"2025-01-16T01:11:38.201182Z"}},"outputs":[{"name":"stdout","text":"Requirement already satisfied: tiktoken in /usr/local/lib/python3.10/dist-packages (0.8.0)\nRequirement already satisfied: regex>=2022.1.18 in /usr/local/lib/python3.10/dist-packages (from tiktoken) (2024.11.6)\nRequirement already satisfied: requests>=2.26.0 in /usr/local/lib/python3.10/dist-packages (from tiktoken) (2.32.3)\nRequirement already satisfied: charset-normalizer<4,>=2 in /usr/local/lib/python3.10/dist-packages (from requests>=2.26.0->tiktoken) (3.4.0)\nRequirement already satisfied: idna<4,>=2.5 in /usr/local/lib/python3.10/dist-packages (from requests>=2.26.0->tiktoken) (3.10)\nRequirement already satisfied: urllib3<3,>=1.21.1 in /usr/local/lib/python3.10/dist-packages (from requests>=2.26.0->tiktoken) (2.2.3)\nRequirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.10/dist-packages (from requests>=2.26.0->tiktoken) (2024.12.14)\n","output_type":"stream"}],"execution_count":8},{"cell_type":"code","source":"import tiktoken\n\nclass DataLoaderLite:\n def __init__(self, B, T):\n self.B = B\n self.T = T\n\n # at init load tokens from disk and store them in memory\n with open('/kaggle/input/transformers-input/input.txt', 'r') as f:\n text = f.read()\n enc = tiktoken.get_encoding('gpt2') \n tokens = enc.encode(text)\n self.tokens = torch.tensor(tokens)\n print(f'loaded {len(self.tokens)} tokens')\n print(f'1 epoch = {len(self.tokens) // (B * T)} batches')\n\n # state\n self.current_position = 0\n \n def next_batch(self):\n B, T = self.B, self.T\n buf = self.tokens[self.current_position: self.current_position + B * T + 1]\n x = (buf[:-1]).view(B, T) # inputs\n y = (buf[1:]).view(B, T) # targets\n # advance the position in the tensor\n self.current_position += B*T\n # if loading the next batch would be out of bounds, reset\n if self.current_position + (B * T + 1) > len(self.tokens):\n self.current_position = 0\n return x, y\n","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2025-01-16T01:11:40.943438Z","iopub.execute_input":"2025-01-16T01:11:40.943746Z","iopub.status.idle":"2025-01-16T01:11:40.988785Z","shell.execute_reply.started":"2025-01-16T01:11:40.943722Z","shell.execute_reply":"2025-01-16T01:11:40.988104Z"}},"outputs":[],"execution_count":9},{"cell_type":"code","source":"import logging\n\n# Configure logging\nlogging.basicConfig(filename='/kaggle/working/training_log.txt', level=logging.INFO, \n format='%(asctime)s - %(levelname)s - %(message)s', force=True)","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2025-01-16T01:11:43.834394Z","iopub.execute_input":"2025-01-16T01:11:43.834693Z","iopub.status.idle":"2025-01-16T01:11:43.839266Z","shell.execute_reply.started":"2025-01-16T01:11:43.834671Z","shell.execute_reply":"2025-01-16T01:11:43.838356Z"}},"outputs":[],"execution_count":10},{"cell_type":"code","source":"model = GPT(GPTConfig())\nmodel.to(device)\n\n# ... existing model initialization ...\n\n# Get model summary and parameter count\ntotal_params = sum(p.numel() for p in model.parameters())\ntrainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)\nlogging.info(f'Total Parameters: {total_params:,}')\nlogging.info(f'Trainable Parameters: {trainable_params:,}')\n\n\ntrain_loader = DataLoaderLite(B = 4, T = 256)\noptimizer = torch.optim.AdamW(model.parameters(), lr = 3e-4)\n\n# Track best loss for model saving\nbest_loss = float('inf')\nbest_model_path = '/kaggle/working/best_model.pth'\ntarget_loss = 0.099999\nmax_iterations = 1000000 # Safety limit to prevent infinite loops\n\ni = 0\nwhile True:\n x, y = train_loader.next_batch()\n x, y = x.to(device), y.to(device)\n optimizer.zero_grad()\n logits, loss = model(x, y)\n loss.backward()\n optimizer.step()\n \n # Save best model\n if loss.item() < best_loss:\n best_loss = loss.item()\n torch.save({\n 'epoch': i,\n 'model_state_dict': model.state_dict(),\n 'optimizer_state_dict': optimizer.state_dict(),\n 'loss': best_loss,\n }, best_model_path)\n \n # Replace print statements with logging\n logging.info(f'step {i}, loss: {loss.item():.6f}, best loss: {best_loss:.6f}')\n \n # Check if target loss is reached\n if loss.item() <= target_loss:\n logging.info(f'\\nTarget loss reached! Training completed at step {i}')\n break\n \n # Safety check to prevent infinite training\n if i >= max_iterations:\n logging.info('\\nMaximum iterations reached without hitting target loss')\n break\n \n i += 1\n\nlogging.info(f'\\nTraining completed!')\nlogging.info(f'Final loss: {loss.item():.6f}')\nlogging.info(f'Best loss achieved: {best_loss:.6f}')\nlogging.info(f'Best model saved to: {best_model_path}')","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2025-01-16T01:11:51.706858Z","iopub.execute_input":"2025-01-16T01:11:51.707283Z","iopub.status.idle":"2025-01-16T02:14:36.050844Z","shell.execute_reply.started":"2025-01-16T01:11:51.707251Z","shell.execute_reply":"2025-01-16T02:14:36.049681Z"}},"outputs":[{"name":"stdout","text":"loaded 338025 tokens\n1 epoch = 330 batches\n","output_type":"stream"}],"execution_count":11},{"cell_type":"code","source":"import sys;\n\ntorch.manual_seed(42)\ntorch.cuda.manual_seed(42)\nwhile x.size(1) < max_length:\n # forward the model to get the logits\n with torch.no_grad():\n logits = model(x)[0] # (B, T, vocab_size)\n # take the logits at the last position\n logits = logits[:, -1, :] # (B, vocab_size)\n # get the probabilities\n probs = F.softmax(logits, dim=-1)\n # do top-k sampling of 50 (huggingface pipeline default)\n # topk_probs here becomes (5, 50), topk_indices is (5, 50)\n topk_probs, topk_indices = torch.topk(probs, 50, dim=-1)\n # select a token from the top-k probabilities\n # note: multinomial does not demand the input to sum to 1\n ix = torch.multinomial(topk_probs, 1) # (B, 1)\n # gather the corresponding indices\n xcol = torch.gather(topk_indices, -1, ix) # (B, 1)\n # append to the sequence\n x = torch.cat((x, xcol), dim=1)","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2025-01-16T02:21:27.100740Z","iopub.execute_input":"2025-01-16T02:21:27.101343Z","iopub.status.idle":"2025-01-16T02:21:27.109634Z","shell.execute_reply.started":"2025-01-16T02:21:27.101308Z","shell.execute_reply":"2025-01-16T02:21:27.108902Z"}},"outputs":[],"execution_count":12},{"cell_type":"code","source":"enc = tiktoken.get_encoding('gpt2') \n# print the generated text\nfor i in range(num_return_sequences-1):\n tokens = x[i, :max_length].tolist()\n decoded = enc.decode(tokens)\n print(\">\", decoded)","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2025-01-16T02:29:15.075790Z","iopub.execute_input":"2025-01-16T02:29:15.076153Z","iopub.status.idle":"2025-01-16T02:29:15.082856Z","shell.execute_reply.started":"2025-01-16T02:29:15.076121Z","shell.execute_reply":"2025-01-16T02:29:15.082202Z"}},"outputs":[{"name":"stdout","text":"> braved me in this matter so?\n\nBAPTISTA:\nWhy, tell me, is not this my Cambio?\n\n> ; thy father will not frown.\n\nGREMIO:\nMy cake is dough; but I'll in among the rest,\nOut of\n> scapes and perils overblown.\nMy fair Bianca, bid my father welcome,\nWhile I with self-same kindness welcome thine\n> afeard of you.\n\nWidow:\nHe that is giddy thinks the world turns round.\n\nPETRUCH\n","output_type":"stream"}],"execution_count":18},{"cell_type":"code","source":"","metadata":{"trusted":true},"outputs":[],"execution_count":null}]}