Chaitanya Sagar Gurujula commited on
Commit
e59a831
·
1 Parent(s): c3f5954

added initial version

Browse files
Dockerfile ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.9-slim
2
+
3
+ WORKDIR /app
4
+
5
+ COPY requirements.txt .
6
+ RUN pip install -r requirements.txt
7
+
8
+ COPY src/ .
9
+
10
+ CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]
gpt-transformers.ipynb ADDED
@@ -0,0 +1 @@
 
 
1
+ {"metadata":{"kernelspec":{"language":"python","display_name":"Python 3","name":"python3"},"language_info":{"name":"python","version":"3.10.12","mimetype":"text/x-python","codemirror_mode":{"name":"ipython","version":3},"pygments_lexer":"ipython3","nbconvert_exporter":"python","file_extension":".py"},"kaggle":{"accelerator":"gpu","dataSources":[{"sourceId":10477901,"sourceType":"datasetVersion","datasetId":6487916}],"dockerImageVersionId":30840,"isInternetEnabled":true,"language":"python","sourceType":"notebook","isGpuEnabled":true}},"nbformat_minor":4,"nbformat":4,"cells":[{"cell_type":"code","source":"# This Python 3 environment comes with many helpful analytics libraries installed\n# It is defined by the kaggle/python Docker image: https://github.com/kaggle/docker-python\n# For example, here's several helpful packages to load\n\nimport numpy as np # linear algebra\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\n\n# Input data files are available in the read-only \"../input/\" directory\n# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory\n\nimport os\nfor dirname, _, filenames in os.walk('/kaggle/input/transformers-input/input.txt'):\n for filename in filenames:\n print(os.path.join(dirname, filename))\n\n# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using \"Save & Run All\" \n# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session","metadata":{"_uuid":"8f2839f25d086af736a60e9eeb907d3b93b6e0e5","_cell_guid":"b1076dfc-b9ad-4769-8c92-a6c4dae69d19","trusted":true,"execution":{"iopub.status.busy":"2025-01-15T14:34:35.524291Z","iopub.execute_input":"2025-01-15T14:34:35.524674Z","iopub.status.idle":"2025-01-15T14:34:36.015800Z","shell.execute_reply.started":"2025-01-15T14:34:35.524644Z","shell.execute_reply":"2025-01-15T14:34:36.014727Z"}},"outputs":[],"execution_count":9},{"cell_type":"code","source":"# Solving for residual std scaling issue\nimport os\nimport math\nimport time\nimport inspect\nfrom dataclasses import dataclass\nimport torch\nimport torch.nn as nn\nfrom torch.nn import functional as F","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2025-01-16T01:11:14.295213Z","iopub.execute_input":"2025-01-16T01:11:14.295523Z","iopub.status.idle":"2025-01-16T01:11:17.362144Z","shell.execute_reply.started":"2025-01-16T01:11:14.295499Z","shell.execute_reply":"2025-01-16T01:11:17.361475Z"}},"outputs":[],"execution_count":1},{"cell_type":"code","source":"class CausalSelfAttention(nn.Module):\n\n def __init__(self, config):\n super().__init__()\n assert config.n_embd % config.n_head == 0\n # key, query, value projections for all heads, but in a batch\n self.c_attn = nn.Linear(config.n_embd, 3 * config.n_embd)\n # output projection\n self.c_proj = nn.Linear(config.n_embd, config.n_embd)\n self.c_proj.NANGPT_SCALE_INIT = 1\n # regularization\n self.n_head = config.n_head\n self.n_embd = config.n_embd\n self.register_buffer(\"bias\", torch.tril(torch.ones(config.block_size, config.block_size)).view(1, 1, config.block_size, config.block_size))\n\n def forward(self, x):\n B, T, C = x.size() # batch size, sequence length, embedding dimensionality (n_embd)\n # calculate query, key, values for all heads in batch and move head forward to be the batch dim\n # nh is \"number of heads\", hs is \"head size\", and C (number of channels) = nh * hs\n # e.g. in GPT-2 (124M), n_head=12, hs=64, so nh*hs=C=768 channels in the Transformer\n qkv = self.c_attn(x)\n q, k, v = qkv.split(self.n_embd, dim=2)\n k = k.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)\n q = q.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)\n v = v.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)\n\n att = (q @ k.transpose(-2, -1)) * (1.0 / math.sqrt(k.size(-1)))\n att = att.masked_fill(self.bias[:, :, :T, :T] == 0, float('-inf'))\n att = F.softmax(att, dim=-1)\n y = att @ v # (B, nh, T, T) x (B, nh, T, hs) -> (B, nh, T, hs)\n\n y = y.transpose(1, 2).contiguous().view(B, T, C) # re-assemble all head outputs side by side\n # output projection\n y = self.c_proj(y)\n return y\n","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2025-01-16T01:11:17.363161Z","iopub.execute_input":"2025-01-16T01:11:17.363491Z","iopub.status.idle":"2025-01-16T01:11:17.370629Z","shell.execute_reply.started":"2025-01-16T01:11:17.363471Z","shell.execute_reply":"2025-01-16T01:11:17.369710Z"}},"outputs":[],"execution_count":2},{"cell_type":"code","source":"class MLP(nn.Module):\n\n def __init__(self, config):\n super().__init__()\n self.c_fc = nn.Linear(config.n_embd, 4 * config.n_embd)\n self.gelu = nn.GELU(approximate='tanh')\n self.c_proj = nn.Linear(4 * config.n_embd, config.n_embd)\n self.c_proj.NANOGPT_SCALE_INIT = 1\n\n def forward(self, x):\n x = self.c_fc(x)\n x = self.gelu(x)\n x = self.c_proj(x)\n return x","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2025-01-16T01:11:19.837235Z","iopub.execute_input":"2025-01-16T01:11:19.837640Z","iopub.status.idle":"2025-01-16T01:11:19.843845Z","shell.execute_reply.started":"2025-01-16T01:11:19.837608Z","shell.execute_reply":"2025-01-16T01:11:19.842822Z"}},"outputs":[],"execution_count":3},{"cell_type":"code","source":"class Block(nn.Module):\n\n def __init__(self, config):\n super().__init__()\n self.ln_1 = nn.LayerNorm(config.n_embd)\n self.attn = CausalSelfAttention(config)\n self.ln_2 = nn.LayerNorm(config.n_embd)\n self.mlp = MLP(config)\n\n def forward(self, x):\n x = x + self.attn(self.ln_1(x))\n x = x + self.mlp(self.ln_2(x))\n return x","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2025-01-16T01:11:22.456129Z","iopub.execute_input":"2025-01-16T01:11:22.456449Z","iopub.status.idle":"2025-01-16T01:11:22.461192Z","shell.execute_reply.started":"2025-01-16T01:11:22.456421Z","shell.execute_reply":"2025-01-16T01:11:22.460364Z"}},"outputs":[],"execution_count":4},{"cell_type":"code","source":"@dataclass\nclass GPTConfig:\n block_size: int = 1024 # max sequence length\n vocab_size: int = 50257 # number of tokens: 50,000 BPE merges + 256 bytes tokens + 1 <|endoftext|> token\n n_layer: int = 12 # number of layers\n n_head: int = 12 # number of heads\n n_embd: int = 768 # embedding dimension","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2025-01-16T01:11:24.402909Z","iopub.execute_input":"2025-01-16T01:11:24.403223Z","iopub.status.idle":"2025-01-16T01:11:24.407719Z","shell.execute_reply.started":"2025-01-16T01:11:24.403198Z","shell.execute_reply":"2025-01-16T01:11:24.406984Z"}},"outputs":[],"execution_count":5},{"cell_type":"code","source":"class GPT(nn.Module):\n\n def __init__(self, config):\n super().__init__()\n self.config = config\n\n self.transformer = nn.ModuleDict(dict(\n wte = nn.Embedding(config.vocab_size, config.n_embd),\n wpe = nn.Embedding(config.block_size, config.n_embd),\n h = nn.ModuleList([Block(config) for _ in range(config.n_layer)]),\n ln_f = nn.LayerNorm(config.n_embd),\n ))\n self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)\n\n # weight sharing\n self.transformer.wte.weight = self.lm_head.weight\n\n # weight initialization\n self.apply(self._init_weights)\n\n def _init_weights(self, module):\n if isinstance(module, nn.Linear):\n std = 0.02\n if hasattr(module, 'NANGPT_SCALE_INIT'):\n std *= (2 * self.config.n_layer) ** -0.5\n torch.nn.init.normal_(module.weight, mean = 0.0, std = std)\n if module.bias is not None:\n torch.nn.init.zeros_(module.bias)\n elif isinstance(module, nn.Embedding):\n torch.nn.init.normal_(module.weight, mean=0.0, std = 0.02)\n\n\n\n def forward(self, idx, targets=None):\n # idx is of shape (B, T)\n B, T = idx.size()\n assert T <= self.config.block_size, f\"Cannot forward sequence of length {T}, block size is only {self.config.block_size}\"\n # forward the token and posisition embeddings\n pos = torch.arange(0, T, dtype=torch.long, device=idx.device) # shape (T)\n pos_emb = self.transformer.wpe(pos) # position embeddings of shape (T, n_embd)\n tok_emb = self.transformer.wte(idx) # token embeddings of shape (B, T, n_embd)\n x = tok_emb + pos_emb\n # forward the blocks of the transformer\n for block in self.transformer.h:\n x = block(x)\n # forward the final layernorm and the classifier\n x = self.transformer.ln_f(x)\n logits = self.lm_head(x) # (B, T, vocab_size)\n loss = None\n if targets is not None:\n loss = F.cross_entropy(logits.view(-1, logits.size(-1)), targets.view(-1))\n return logits, loss\n\n @classmethod\n def from_pretrained(cls, model_type):\n \"\"\"Loads pretrained GPT-2 model weights from huggingface\"\"\"\n assert model_type in {'gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'}\n from transformers import GPT2LMHeadModel\n print(\"loading weights from pretrained gpt: %s\" % model_type)\n\n # n_layer, n_head and n_embd are determined from model_type\n config_args = {\n 'gpt2': dict(n_layer=12, n_head=12, n_embd=768), # 124M params\n 'gpt2-medium': dict(n_layer=24, n_head=16, n_embd=1024), # 350M params\n 'gpt2-large': dict(n_layer=36, n_head=20, n_embd=1280), # 774M params\n 'gpt2-xl': dict(n_layer=48, n_head=25, n_embd=1600), # 1558M params\n }[model_type]\n config_args['vocab_size'] = 50257 # always 50257 for GPT model checkpoints\n config_args['block_size'] = 1024 # always 1024 for GPT model checkpoints\n # create a from-scratch initialized minGPT model\n config = GPTConfig(**config_args)\n model = GPT(config)\n sd = model.state_dict()\n sd_keys = sd.keys()\n sd_keys = [k for k in sd_keys if not k.endswith('.attn.bias')] # discard this mask / buffer, not a param\n\n # init a huggingface/transformers model\n model_hf = GPT2LMHeadModel.from_pretrained(model_type)\n sd_hf = model_hf.state_dict()\n\n # copy while ensuring all of the parameters are aligned and match in names and shapes\n sd_keys_hf = sd_hf.keys()\n sd_keys_hf = [k for k in sd_keys_hf if not k.endswith('.attn.masked_bias')] # ignore these, just a buffer\n sd_keys_hf = [k for k in sd_keys_hf if not k.endswith('.attn.bias')] # same, just the mask (buffer)\n transposed = ['attn.c_attn.weight', 'attn.c_proj.weight', 'mlp.c_fc.weight', 'mlp.c_proj.weight']\n # basically the openai checkpoints use a \"Conv1D\" module, but we only want to use a vanilla Linear\n # this means that we have to transpose these weights when we import them\n assert len(sd_keys_hf) == len(sd_keys), f\"mismatched keys: {len(sd_keys_hf)} != {len(sd_keys)}\"\n for k in sd_keys_hf:\n if any(k.endswith(w) for w in transposed):\n # special treatment for the Conv1D weights we need to transpose\n assert sd_hf[k].shape[::-1] == sd[k].shape\n with torch.no_grad():\n sd[k].copy_(sd_hf[k].t())\n else:\n # vanilla copy over the other parameters\n assert sd_hf[k].shape == sd[k].shape\n with torch.no_grad():\n sd[k].copy_(sd_hf[k])\n\n return model","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2025-01-16T01:11:26.530758Z","iopub.execute_input":"2025-01-16T01:11:26.531068Z","iopub.status.idle":"2025-01-16T01:11:26.543967Z","shell.execute_reply.started":"2025-01-16T01:11:26.531045Z","shell.execute_reply":"2025-01-16T01:11:26.542879Z"}},"outputs":[],"execution_count":6},{"cell_type":"code","source":"# model = GPT.from_pretrained('gpt2')\n\ndevice = 'cpu'\nif torch.cuda.is_available():\n device = 'cuda'\nelif hasattr(torch.backends, \"mps\") and torch.backends.mps.is_available():\n device = \"mps\"\nprint(f\"using device: {device}\")\n\n# SEED\ntorch.manual_seed(1337)\nif torch.cuda.is_available():\n torch.cuda.manual_seed(1337)\n\n# STOP\nnum_return_sequences = 5\nmax_length = 30","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2025-01-16T01:11:31.096650Z","iopub.execute_input":"2025-01-16T01:11:31.096934Z","iopub.status.idle":"2025-01-16T01:11:31.154262Z","shell.execute_reply.started":"2025-01-16T01:11:31.096913Z","shell.execute_reply":"2025-01-16T01:11:31.153422Z"}},"outputs":[{"name":"stdout","text":"using device: cuda\n","output_type":"stream"}],"execution_count":7},{"cell_type":"code","source":"!pip install tiktoken","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2025-01-16T01:11:33.747825Z","iopub.execute_input":"2025-01-16T01:11:33.748149Z","iopub.status.idle":"2025-01-16T01:11:38.202233Z","shell.execute_reply.started":"2025-01-16T01:11:33.748124Z","shell.execute_reply":"2025-01-16T01:11:38.201182Z"}},"outputs":[{"name":"stdout","text":"Requirement already satisfied: tiktoken in /usr/local/lib/python3.10/dist-packages (0.8.0)\nRequirement already satisfied: regex>=2022.1.18 in /usr/local/lib/python3.10/dist-packages (from tiktoken) (2024.11.6)\nRequirement already satisfied: requests>=2.26.0 in /usr/local/lib/python3.10/dist-packages (from tiktoken) (2.32.3)\nRequirement already satisfied: charset-normalizer<4,>=2 in /usr/local/lib/python3.10/dist-packages (from requests>=2.26.0->tiktoken) (3.4.0)\nRequirement already satisfied: idna<4,>=2.5 in /usr/local/lib/python3.10/dist-packages (from requests>=2.26.0->tiktoken) (3.10)\nRequirement already satisfied: urllib3<3,>=1.21.1 in /usr/local/lib/python3.10/dist-packages (from requests>=2.26.0->tiktoken) (2.2.3)\nRequirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.10/dist-packages (from requests>=2.26.0->tiktoken) (2024.12.14)\n","output_type":"stream"}],"execution_count":8},{"cell_type":"code","source":"import tiktoken\n\nclass DataLoaderLite:\n def __init__(self, B, T):\n self.B = B\n self.T = T\n\n # at init load tokens from disk and store them in memory\n with open('/kaggle/input/transformers-input/input.txt', 'r') as f:\n text = f.read()\n enc = tiktoken.get_encoding('gpt2') \n tokens = enc.encode(text)\n self.tokens = torch.tensor(tokens)\n print(f'loaded {len(self.tokens)} tokens')\n print(f'1 epoch = {len(self.tokens) // (B * T)} batches')\n\n # state\n self.current_position = 0\n \n def next_batch(self):\n B, T = self.B, self.T\n buf = self.tokens[self.current_position: self.current_position + B * T + 1]\n x = (buf[:-1]).view(B, T) # inputs\n y = (buf[1:]).view(B, T) # targets\n # advance the position in the tensor\n self.current_position += B*T\n # if loading the next batch would be out of bounds, reset\n if self.current_position + (B * T + 1) > len(self.tokens):\n self.current_position = 0\n return x, y\n","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2025-01-16T01:11:40.943438Z","iopub.execute_input":"2025-01-16T01:11:40.943746Z","iopub.status.idle":"2025-01-16T01:11:40.988785Z","shell.execute_reply.started":"2025-01-16T01:11:40.943722Z","shell.execute_reply":"2025-01-16T01:11:40.988104Z"}},"outputs":[],"execution_count":9},{"cell_type":"code","source":"import logging\n\n# Configure logging\nlogging.basicConfig(filename='/kaggle/working/training_log.txt', level=logging.INFO, \n format='%(asctime)s - %(levelname)s - %(message)s', force=True)","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2025-01-16T01:11:43.834394Z","iopub.execute_input":"2025-01-16T01:11:43.834693Z","iopub.status.idle":"2025-01-16T01:11:43.839266Z","shell.execute_reply.started":"2025-01-16T01:11:43.834671Z","shell.execute_reply":"2025-01-16T01:11:43.838356Z"}},"outputs":[],"execution_count":10},{"cell_type":"code","source":"model = GPT(GPTConfig())\nmodel.to(device)\n\n# ... existing model initialization ...\n\n# Get model summary and parameter count\ntotal_params = sum(p.numel() for p in model.parameters())\ntrainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)\nlogging.info(f'Total Parameters: {total_params:,}')\nlogging.info(f'Trainable Parameters: {trainable_params:,}')\n\n\ntrain_loader = DataLoaderLite(B = 4, T = 256)\noptimizer = torch.optim.AdamW(model.parameters(), lr = 3e-4)\n\n# Track best loss for model saving\nbest_loss = float('inf')\nbest_model_path = '/kaggle/working/best_model.pth'\ntarget_loss = 0.099999\nmax_iterations = 1000000 # Safety limit to prevent infinite loops\n\ni = 0\nwhile True:\n x, y = train_loader.next_batch()\n x, y = x.to(device), y.to(device)\n optimizer.zero_grad()\n logits, loss = model(x, y)\n loss.backward()\n optimizer.step()\n \n # Save best model\n if loss.item() < best_loss:\n best_loss = loss.item()\n torch.save({\n 'epoch': i,\n 'model_state_dict': model.state_dict(),\n 'optimizer_state_dict': optimizer.state_dict(),\n 'loss': best_loss,\n }, best_model_path)\n \n # Replace print statements with logging\n logging.info(f'step {i}, loss: {loss.item():.6f}, best loss: {best_loss:.6f}')\n \n # Check if target loss is reached\n if loss.item() <= target_loss:\n logging.info(f'\\nTarget loss reached! Training completed at step {i}')\n break\n \n # Safety check to prevent infinite training\n if i >= max_iterations:\n logging.info('\\nMaximum iterations reached without hitting target loss')\n break\n \n i += 1\n\nlogging.info(f'\\nTraining completed!')\nlogging.info(f'Final loss: {loss.item():.6f}')\nlogging.info(f'Best loss achieved: {best_loss:.6f}')\nlogging.info(f'Best model saved to: {best_model_path}')","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2025-01-16T01:11:51.706858Z","iopub.execute_input":"2025-01-16T01:11:51.707283Z","iopub.status.idle":"2025-01-16T02:14:36.050844Z","shell.execute_reply.started":"2025-01-16T01:11:51.707251Z","shell.execute_reply":"2025-01-16T02:14:36.049681Z"}},"outputs":[{"name":"stdout","text":"loaded 338025 tokens\n1 epoch = 330 batches\n","output_type":"stream"}],"execution_count":11},{"cell_type":"code","source":"import sys;\n\ntorch.manual_seed(42)\ntorch.cuda.manual_seed(42)\nwhile x.size(1) < max_length:\n # forward the model to get the logits\n with torch.no_grad():\n logits = model(x)[0] # (B, T, vocab_size)\n # take the logits at the last position\n logits = logits[:, -1, :] # (B, vocab_size)\n # get the probabilities\n probs = F.softmax(logits, dim=-1)\n # do top-k sampling of 50 (huggingface pipeline default)\n # topk_probs here becomes (5, 50), topk_indices is (5, 50)\n topk_probs, topk_indices = torch.topk(probs, 50, dim=-1)\n # select a token from the top-k probabilities\n # note: multinomial does not demand the input to sum to 1\n ix = torch.multinomial(topk_probs, 1) # (B, 1)\n # gather the corresponding indices\n xcol = torch.gather(topk_indices, -1, ix) # (B, 1)\n # append to the sequence\n x = torch.cat((x, xcol), dim=1)","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2025-01-16T02:21:27.100740Z","iopub.execute_input":"2025-01-16T02:21:27.101343Z","iopub.status.idle":"2025-01-16T02:21:27.109634Z","shell.execute_reply.started":"2025-01-16T02:21:27.101308Z","shell.execute_reply":"2025-01-16T02:21:27.108902Z"}},"outputs":[],"execution_count":12},{"cell_type":"code","source":"enc = tiktoken.get_encoding('gpt2') \n# print the generated text\nfor i in range(num_return_sequences-1):\n tokens = x[i, :max_length].tolist()\n decoded = enc.decode(tokens)\n print(\">\", decoded)","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2025-01-16T02:29:15.075790Z","iopub.execute_input":"2025-01-16T02:29:15.076153Z","iopub.status.idle":"2025-01-16T02:29:15.082856Z","shell.execute_reply.started":"2025-01-16T02:29:15.076121Z","shell.execute_reply":"2025-01-16T02:29:15.082202Z"}},"outputs":[{"name":"stdout","text":"> braved me in this matter so?\n\nBAPTISTA:\nWhy, tell me, is not this my Cambio?\n\n> ; thy father will not frown.\n\nGREMIO:\nMy cake is dough; but I'll in among the rest,\nOut of\n> scapes and perils overblown.\nMy fair Bianca, bid my father welcome,\nWhile I with self-same kindness welcome thine\n> afeard of you.\n\nWidow:\nHe that is giddy thinks the world turns round.\n\nPETRUCH\n","output_type":"stream"}],"execution_count":18},{"cell_type":"code","source":"","metadata":{"trusted":true},"outputs":[],"execution_count":null}]}
input.txt ADDED
The diff for this file is too large to render. See raw diff
 
requirements.txt ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ fastapi==0.68.0
2
+ uvicorn==0.15.0
3
+ jinja2==3.0.1
4
+ tiktoken
5
+ torch
6
+ transformers
7
+ aiofiles
8
+ fastapi
src/__pycache__/model.cpython-312.pyc ADDED
Binary file (18.3 kB). View file
 
src/app.py ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import os
3
+ from fastapi import FastAPI, Request
4
+ from pydantic import BaseModel
5
+ from transformers import PreTrainedModel, AutoConfig
6
+ from huggingface_hub import hf_hub_download
7
+ import tiktoken
8
+ from model import GPT, GPTConfig
9
+ from fastapi.templating import Jinja2Templates
10
+ from fastapi.middleware.cors import CORSMiddleware
11
+ from fastapi.responses import HTMLResponse
12
+ from fastapi.staticfiles import StaticFiles
13
+
14
+ # Get the absolute path to the templates directory
15
+ TEMPLATES_DIR = os.path.join(os.path.dirname(__file__), "templates")
16
+
17
+ MODEL_ID = "sagargurujula/text-generator"
18
+
19
+ # Initialize FastAPI
20
+ app = FastAPI(title="GPT Text Generator")
21
+
22
+ # Templates with absolute path
23
+ templates = Jinja2Templates(directory=TEMPLATES_DIR)
24
+
25
+ # Add CORS middleware
26
+ app.add_middleware(
27
+ CORSMiddleware,
28
+ allow_origins=["*"],
29
+ allow_credentials=True,
30
+ allow_methods=["*"],
31
+ allow_headers=["*"],
32
+ )
33
+
34
+ # Set device
35
+ device = 'cuda' if torch.cuda.is_available() else 'cpu'
36
+
37
+ # Load model from Hugging Face Hub
38
+ def load_model():
39
+ try:
40
+ # Download the model file from HF Hub
41
+ model_path = hf_hub_download(
42
+ repo_id=MODEL_ID,
43
+ filename="best_model.pth"
44
+ )
45
+
46
+ # Initialize our custom GPT model
47
+ model = GPT(GPTConfig())
48
+
49
+ # Load the state dict
50
+ checkpoint = torch.load(model_path, map_location=device, weights_only=True)
51
+ model.load_state_dict(checkpoint['model_state_dict'])
52
+
53
+ model.to(device)
54
+ model.eval()
55
+ return model
56
+
57
+ except Exception as e:
58
+ print(f"Error loading model: {e}")
59
+ raise
60
+
61
+ # Load the model
62
+ model = load_model()
63
+
64
+ # Define the request body
65
+ class TextInput(BaseModel):
66
+ text: str
67
+
68
+ @app.post("/generate/")
69
+ async def generate_text(input: TextInput):
70
+ # Prepare input tensor
71
+ enc = tiktoken.get_encoding('gpt2')
72
+ input_ids = torch.tensor([enc.encode(input.text)]).to(device)
73
+
74
+ # Generate multiple tokens
75
+ generated_tokens = []
76
+ num_tokens_to_generate = 50 # Generate 20 new tokens
77
+
78
+ with torch.no_grad():
79
+ current_ids = input_ids
80
+
81
+ for _ in range(num_tokens_to_generate):
82
+ # Get model predictions
83
+ logits, _ = model(current_ids)
84
+ next_token = logits[0, -1, :].argmax().item()
85
+ generated_tokens.append(next_token)
86
+
87
+ # Add the new token to our current sequence
88
+ current_ids = torch.cat([current_ids, torch.tensor([[next_token]]).to(device)], dim=1)
89
+
90
+ # Decode all generated tokens
91
+ generated_text = enc.decode(generated_tokens)
92
+
93
+ # Return both input and generated text
94
+ return {
95
+ "input_text": input.text,
96
+ "generated_text": generated_text
97
+ }
98
+
99
+ # Modify the root route to serve the template
100
+ @app.get("/", response_class=HTMLResponse)
101
+ async def home(request: Request):
102
+ return templates.TemplateResponse(
103
+ "index.html",
104
+ {"request": request, "title": "GPT Text Generator"}
105
+ )
106
+
107
+ if __name__ == "__main__":
108
+ import uvicorn
109
+ uvicorn.run(app, host="127.0.0.1", port=8080)
110
+
111
+ # To run the app, use the command: uvicorn app:app --reload
src/model.py ADDED
@@ -0,0 +1,298 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Solving for residual std scaling issue
2
+ import os
3
+ import math
4
+ import time
5
+ import inspect
6
+ from dataclasses import dataclass
7
+ import torch
8
+ import torch.nn as nn
9
+ from torch.nn import functional as F
10
+
11
+ import tiktoken
12
+ import logging
13
+
14
+ class CausalSelfAttention(nn.Module):
15
+
16
+ def __init__(self, config):
17
+ super().__init__()
18
+ assert config.n_embd % config.n_head == 0
19
+ # key, query, value projections for all heads, but in a batch
20
+ self.c_attn = nn.Linear(config.n_embd, 3 * config.n_embd)
21
+ # output projection
22
+ self.c_proj = nn.Linear(config.n_embd, config.n_embd)
23
+ self.c_proj.NANGPT_SCALE_INIT = 1
24
+ # regularization
25
+ self.n_head = config.n_head
26
+ self.n_embd = config.n_embd
27
+ self.register_buffer("bias", torch.tril(torch.ones(config.block_size, config.block_size)).view(1, 1, config.block_size, config.block_size))
28
+
29
+ def forward(self, x):
30
+ B, T, C = x.size() # batch size, sequence length, embedding dimensionality (n_embd)
31
+ # calculate query, key, values for all heads in batch and move head forward to be the batch dim
32
+ # nh is "number of heads", hs is "head size", and C (number of channels) = nh * hs
33
+ # e.g. in GPT-2 (124M), n_head=12, hs=64, so nh*hs=C=768 channels in the Transformer
34
+ qkv = self.c_attn(x)
35
+ q, k, v = qkv.split(self.n_embd, dim=2)
36
+ k = k.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
37
+ q = q.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
38
+ v = v.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
39
+
40
+ att = (q @ k.transpose(-2, -1)) * (1.0 / math.sqrt(k.size(-1)))
41
+ att = att.masked_fill(self.bias[:, :, :T, :T] == 0, float('-inf'))
42
+ att = F.softmax(att, dim=-1)
43
+ y = att @ v # (B, nh, T, T) x (B, nh, T, hs) -> (B, nh, T, hs)
44
+
45
+ y = y.transpose(1, 2).contiguous().view(B, T, C) # re-assemble all head outputs side by side
46
+ # output projection
47
+ y = self.c_proj(y)
48
+ return y
49
+
50
+ class MLP(nn.Module):
51
+
52
+ def __init__(self, config):
53
+ super().__init__()
54
+ self.c_fc = nn.Linear(config.n_embd, 4 * config.n_embd)
55
+ self.gelu = nn.GELU(approximate='tanh')
56
+ self.c_proj = nn.Linear(4 * config.n_embd, config.n_embd)
57
+ self.c_proj.NANOGPT_SCALE_INIT = 1
58
+
59
+ def forward(self, x):
60
+ x = self.c_fc(x)
61
+ x = self.gelu(x)
62
+ x = self.c_proj(x)
63
+ return x
64
+
65
+ class Block(nn.Module):
66
+
67
+ def __init__(self, config):
68
+ super().__init__()
69
+ self.ln_1 = nn.LayerNorm(config.n_embd)
70
+ self.attn = CausalSelfAttention(config)
71
+ self.ln_2 = nn.LayerNorm(config.n_embd)
72
+ self.mlp = MLP(config)
73
+
74
+ def forward(self, x):
75
+ x = x + self.attn(self.ln_1(x))
76
+ x = x + self.mlp(self.ln_2(x))
77
+ return x
78
+
79
+ @dataclass
80
+ class GPTConfig:
81
+ block_size: int = 1024 # max sequence length
82
+ vocab_size: int = 50257 # number of tokens: 50,000 BPE merges + 256 bytes tokens + 1 <|endoftext|> token
83
+ n_layer: int = 12 # number of layers
84
+ n_head: int = 12 # number of heads
85
+ n_embd: int = 768 # embedding dimension
86
+
87
+ class GPT(nn.Module):
88
+
89
+ def __init__(self, config):
90
+ super().__init__()
91
+ self.config = config
92
+
93
+ self.transformer = nn.ModuleDict(dict(
94
+ wte = nn.Embedding(config.vocab_size, config.n_embd),
95
+ wpe = nn.Embedding(config.block_size, config.n_embd),
96
+ h = nn.ModuleList([Block(config) for _ in range(config.n_layer)]),
97
+ ln_f = nn.LayerNorm(config.n_embd),
98
+ ))
99
+ self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
100
+
101
+ # weight sharing
102
+ self.transformer.wte.weight = self.lm_head.weight
103
+
104
+ # weight initialization
105
+ self.apply(self._init_weights)
106
+
107
+ def _init_weights(self, module):
108
+ if isinstance(module, nn.Linear):
109
+ std = 0.02
110
+ if hasattr(module, 'NANGPT_SCALE_INIT'):
111
+ std *= (2 * self.config.n_layer) ** -0.5
112
+ torch.nn.init.normal_(module.weight, mean = 0.0, std = std)
113
+ if module.bias is not None:
114
+ torch.nn.init.zeros_(module.bias)
115
+ elif isinstance(module, nn.Embedding):
116
+ torch.nn.init.normal_(module.weight, mean=0.0, std = 0.02)
117
+
118
+
119
+
120
+ def forward(self, idx, targets=None):
121
+ # idx is of shape (B, T)
122
+ B, T = idx.size()
123
+ assert T <= self.config.block_size, f"Cannot forward sequence of length {T}, block size is only {self.config.block_size}"
124
+ # forward the token and posisition embeddings
125
+ pos = torch.arange(0, T, dtype=torch.long, device=idx.device) # shape (T)
126
+ pos_emb = self.transformer.wpe(pos) # position embeddings of shape (T, n_embd)
127
+ tok_emb = self.transformer.wte(idx) # token embeddings of shape (B, T, n_embd)
128
+ x = tok_emb + pos_emb
129
+ # forward the blocks of the transformer
130
+ for block in self.transformer.h:
131
+ x = block(x)
132
+ # forward the final layernorm and the classifier
133
+ x = self.transformer.ln_f(x)
134
+ logits = self.lm_head(x) # (B, T, vocab_size)
135
+ loss = None
136
+ if targets is not None:
137
+ loss = F.cross_entropy(logits.view(-1, logits.size(-1)), targets.view(-1))
138
+ return logits, loss
139
+
140
+ @classmethod
141
+ def from_pretrained(cls, model_type):
142
+ """Loads pretrained GPT-2 model weights from huggingface"""
143
+ assert model_type in {'gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'}
144
+ from transformers import GPT2LMHeadModel
145
+ print("loading weights from pretrained gpt: %s" % model_type)
146
+
147
+ # n_layer, n_head and n_embd are determined from model_type
148
+ config_args = {
149
+ 'gpt2': dict(n_layer=12, n_head=12, n_embd=768), # 124M params
150
+ 'gpt2-medium': dict(n_layer=24, n_head=16, n_embd=1024), # 350M params
151
+ 'gpt2-large': dict(n_layer=36, n_head=20, n_embd=1280), # 774M params
152
+ 'gpt2-xl': dict(n_layer=48, n_head=25, n_embd=1600), # 1558M params
153
+ }[model_type]
154
+ config_args['vocab_size'] = 50257 # always 50257 for GPT model checkpoints
155
+ config_args['block_size'] = 1024 # always 1024 for GPT model checkpoints
156
+ # create a from-scratch initialized minGPT model
157
+ config = GPTConfig(**config_args)
158
+ model = GPT(config)
159
+ sd = model.state_dict()
160
+ sd_keys = sd.keys()
161
+ sd_keys = [k for k in sd_keys if not k.endswith('.attn.bias')] # discard this mask / buffer, not a param
162
+
163
+ # init a huggingface/transformers model
164
+ model_hf = GPT2LMHeadModel.from_pretrained(model_type)
165
+ sd_hf = model_hf.state_dict()
166
+
167
+ # copy while ensuring all of the parameters are aligned and match in names and shapes
168
+ sd_keys_hf = sd_hf.keys()
169
+ sd_keys_hf = [k for k in sd_keys_hf if not k.endswith('.attn.masked_bias')] # ignore these, just a buffer
170
+ sd_keys_hf = [k for k in sd_keys_hf if not k.endswith('.attn.bias')] # same, just the mask (buffer)
171
+ transposed = ['attn.c_attn.weight', 'attn.c_proj.weight', 'mlp.c_fc.weight', 'mlp.c_proj.weight']
172
+ # basically the openai checkpoints use a "Conv1D" module, but we only want to use a vanilla Linear
173
+ # this means that we have to transpose these weights when we import them
174
+ assert len(sd_keys_hf) == len(sd_keys), f"mismatched keys: {len(sd_keys_hf)} != {len(sd_keys)}"
175
+ for k in sd_keys_hf:
176
+ if any(k.endswith(w) for w in transposed):
177
+ # special treatment for the Conv1D weights we need to transpose
178
+ assert sd_hf[k].shape[::-1] == sd[k].shape
179
+ with torch.no_grad():
180
+ sd[k].copy_(sd_hf[k].t())
181
+ else:
182
+ # vanilla copy over the other parameters
183
+ assert sd_hf[k].shape == sd[k].shape
184
+ with torch.no_grad():
185
+ sd[k].copy_(sd_hf[k])
186
+
187
+ return model
188
+
189
+ class DataLoaderLite:
190
+ def __init__(self, B, T):
191
+ self.B = B
192
+ self.T = T
193
+
194
+ # at init load tokens from disk and store them in memory
195
+ with open('/kaggle/input/transformers-input/input.txt', 'r') as f:
196
+ text = f.read()
197
+ enc = tiktoken.get_encoding('gpt2')
198
+ tokens = enc.encode(text)
199
+ self.tokens = torch.tensor(tokens)
200
+ print(f'loaded {len(self.tokens)} tokens')
201
+ print(f'1 epoch = {len(self.tokens) // (B * T)} batches')
202
+
203
+ # state
204
+ self.current_position = 0
205
+
206
+ def next_batch(self):
207
+ B, T = self.B, self.T
208
+ buf = self.tokens[self.current_position: self.current_position + B * T + 1]
209
+ x = (buf[:-1]).view(B, T) # inputs
210
+ y = (buf[1:]).view(B, T) # targets
211
+ # advance the position in the tensor
212
+ self.current_position += B*T
213
+ # if loading the next batch would be out of bounds, reset
214
+ if self.current_position + (B * T + 1) > len(self.tokens):
215
+ self.current_position = 0
216
+ return x, y
217
+
218
+ if __name__ == "__main__":
219
+ # model = GPT.from_pretrained('gpt2')
220
+ # Configure logging
221
+ logging.basicConfig(filename='/kaggle/working/training_log.txt', level=logging.INFO,
222
+ format='%(asctime)s - %(levelname)s - %(message)s', force=True)
223
+
224
+ device = 'cpu'
225
+ if torch.cuda.is_available():
226
+ device = 'cuda'
227
+ elif hasattr(torch.backends, "mps") and torch.backends.mps.is_available():
228
+ device = "mps"
229
+ print(f"using device: {device}")
230
+
231
+ # SEED
232
+ torch.manual_seed(1337)
233
+ if torch.cuda.is_available():
234
+ torch.cuda.manual_seed(1337)
235
+
236
+ # STOP
237
+ num_return_sequences = 5
238
+ max_length = 30
239
+
240
+ model = GPT(GPTConfig())
241
+ model.to(device)
242
+
243
+ # ... existing model initialization ...
244
+
245
+ # Get model summary and parameter count
246
+ total_params = sum(p.numel() for p in model.parameters())
247
+ trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
248
+ logging.info(f'Total Parameters: {total_params:,}')
249
+ logging.info(f'Trainable Parameters: {trainable_params:,}')
250
+
251
+
252
+ train_loader = DataLoaderLite(B = 4, T = 256)
253
+ optimizer = torch.optim.AdamW(model.parameters(), lr = 3e-4)
254
+
255
+ # Track best loss for model saving
256
+ best_loss = float('inf')
257
+ best_model_path = '/kaggle/working/best_model.pth'
258
+ target_loss = 0.099999
259
+ max_iterations = 1000000 # Safety limit to prevent infinite loops
260
+
261
+ i = 0
262
+ while True:
263
+ x, y = train_loader.next_batch()
264
+ x, y = x.to(device), y.to(device)
265
+ optimizer.zero_grad()
266
+ logits, loss = model(x, y)
267
+ loss.backward()
268
+ optimizer.step()
269
+
270
+ # Save best model
271
+ if loss.item() < best_loss:
272
+ best_loss = loss.item()
273
+ torch.save({
274
+ 'epoch': i,
275
+ 'model_state_dict': model.state_dict(),
276
+ 'optimizer_state_dict': optimizer.state_dict(),
277
+ 'loss': best_loss,
278
+ }, best_model_path)
279
+
280
+ # Replace print statements with logging
281
+ logging.info(f'step {i}, loss: {loss.item():.6f}, best loss: {best_loss:.6f}')
282
+
283
+ # Check if target loss is reached
284
+ if loss.item() <= target_loss:
285
+ logging.info(f'\nTarget loss reached! Training completed at step {i}')
286
+ break
287
+
288
+ # Safety check to prevent infinite training
289
+ if i >= max_iterations:
290
+ logging.info('\nMaximum iterations reached without hitting target loss')
291
+ break
292
+
293
+ i += 1
294
+
295
+ logging.info(f'\nTraining completed!')
296
+ logging.info(f'Final loss: {loss.item():.6f}')
297
+ logging.info(f'Best loss achieved: {best_loss:.6f}')
298
+ logging.info(f'Best model saved to: {best_model_path}')
src/templates/index.html ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!DOCTYPE html>
2
+ <html>
3
+ <head>
4
+ <title>GPT Text Generator</title>
5
+ <style>
6
+ body {
7
+ font-family: Arial, sans-serif;
8
+ max-width: 800px;
9
+ margin: 0 auto;
10
+ padding: 20px;
11
+ }
12
+ textarea {
13
+ width: 100%;
14
+ height: 100px;
15
+ margin: 10px 0;
16
+ padding: 10px;
17
+ font-size: 16px;
18
+ text-align: left;
19
+ }
20
+ #result {
21
+ margin-top: 20px;
22
+ padding: 15px;
23
+ border: 1px solid #ddd;
24
+ border-radius: 5px;
25
+ min-height: 50px;
26
+ white-space: pre-wrap;
27
+ background-color: #f9f9f9;
28
+ text-align: left;
29
+ font-size: 16px;
30
+ line-height: 1.5;
31
+ }
32
+ .loading {
33
+ opacity: 0.5;
34
+ }
35
+ button {
36
+ padding: 10px 20px;
37
+ font-size: 16px;
38
+ cursor: pointer;
39
+ display: block;
40
+ margin-bottom: 20px;
41
+ }
42
+ .label {
43
+ font-weight: bold;
44
+ display: block;
45
+ margin-bottom: 10px;
46
+ }
47
+ </style>
48
+ </head>
49
+ <body>
50
+ <h1>GPT Text Generator</h1>
51
+ <form id="generateForm">
52
+ <textarea id="inputText" placeholder="Enter your text here..."></textarea>
53
+ <button type="submit">Generate</button>
54
+ </form>
55
+ <div id="result"></div>
56
+
57
+ <script>
58
+ document.getElementById('generateForm').addEventListener('submit', async (e) => {
59
+ e.preventDefault();
60
+
61
+ const inputText = document.getElementById('inputText').value;
62
+ const resultDiv = document.getElementById('result');
63
+ const submitButton = document.querySelector('button[type="submit"]');
64
+
65
+ // Show loading state
66
+ submitButton.disabled = true;
67
+ resultDiv.classList.add('loading');
68
+ resultDiv.textContent = 'Generating...';
69
+
70
+ try {
71
+ const response = await fetch('/generate/', {
72
+ method: 'POST',
73
+ headers: {
74
+ 'Content-Type': 'application/json',
75
+ },
76
+ body: JSON.stringify({ text: inputText })
77
+ });
78
+
79
+ const data = await response.json();
80
+ resultDiv.innerHTML = `
81
+ <div class="label">Input:</div>
82
+ ${data.input_text}
83
+
84
+ <div class="label" style="margin-top: 20px;">Generated continuation:</div>
85
+ ${data.generated_text}
86
+ `;
87
+ } catch (error) {
88
+ console.error('Error:', error);
89
+ resultDiv.textContent = 'Error generating text. Please try again.';
90
+ } finally {
91
+ // Reset loading state
92
+ submitButton.disabled = false;
93
+ resultDiv.classList.remove('loading');
94
+ }
95
+ });
96
+ </script>
97
+ </body>
98
+ </html>
train_get2-8-init.py ADDED
@@ -0,0 +1,287 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Solving for residual std scaling issue
2
+ import os
3
+ import math
4
+ import time
5
+ import inspect
6
+ from dataclasses import dataclass
7
+ import torch
8
+ import torch.nn as nn
9
+ from torch.nn import functional as F
10
+
11
+
12
+ class CausalSelfAttention(nn.Module):
13
+
14
+ def __init__(self, config):
15
+ super().__init__()
16
+ assert config.n_embd % config.n_head == 0
17
+ # key, query, value projections for all heads, but in a batch
18
+ self.c_attn = nn.Linear(config.n_embd, 3 * config.n_embd)
19
+ # output projection
20
+ self.c_proj = nn.Linear(config.n_embd, config.n_embd)
21
+ self.c_proj.NANGPT_SCALE_INIT = 1
22
+ # regularization
23
+ self.n_head = config.n_head
24
+ self.n_embd = config.n_embd
25
+ self.register_buffer("bias", torch.tril(torch.ones(config.block_size, config.block_size)).view(1, 1, config.block_size, config.block_size))
26
+
27
+ def forward(self, x):
28
+ B, T, C = x.size() # batch size, sequence length, embedding dimensionality (n_embd)
29
+ # calculate query, key, values for all heads in batch and move head forward to be the batch dim
30
+ # nh is "number of heads", hs is "head size", and C (number of channels) = nh * hs
31
+ # e.g. in GPT-2 (124M), n_head=12, hs=64, so nh*hs=C=768 channels in the Transformer
32
+ qkv = self.c_attn(x)
33
+ q, k, v = qkv.split(self.n_embd, dim=2)
34
+ k = k.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
35
+ q = q.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
36
+ v = v.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
37
+
38
+ att = (q @ k.transpose(-2, -1)) * (1.0 / math.sqrt(k.size(-1)))
39
+ att = att.masked_fill(self.bias[:, :, :T, :T] == 0, float('-inf'))
40
+ att = F.softmax(att, dim=-1)
41
+ y = att @ v # (B, nh, T, T) x (B, nh, T, hs) -> (B, nh, T, hs)
42
+
43
+ y = y.transpose(1, 2).contiguous().view(B, T, C) # re-assemble all head outputs side by side
44
+ # output projection
45
+ y = self.c_proj(y)
46
+ return y
47
+
48
+
49
+ class MLP(nn.Module):
50
+
51
+ def __init__(self, config):
52
+ super().__init__()
53
+ self.c_fc = nn.Linear(config.n_embd, 4 * config.n_embd)
54
+ self.gelu = nn.GELU(approximate='tanh')
55
+ self.c_proj = nn.Linear(4 * config.n_embd, config.n_embd)
56
+ self.c_proj.NANOGPT_SCALE_INIT = 1
57
+
58
+ def forward(self, x):
59
+ x = self.c_fc(x)
60
+ x = self.gelu(x)
61
+ x = self.c_proj(x)
62
+ return x
63
+
64
+ class Block(nn.Module):
65
+
66
+ def __init__(self, config):
67
+ super().__init__()
68
+ self.ln_1 = nn.LayerNorm(config.n_embd)
69
+ self.attn = CausalSelfAttention(config)
70
+ self.ln_2 = nn.LayerNorm(config.n_embd)
71
+ self.mlp = MLP(config)
72
+
73
+ def forward(self, x):
74
+ x = x + self.attn(self.ln_1(x))
75
+ x = x + self.mlp(self.ln_2(x))
76
+ return x
77
+
78
+
79
+ @dataclass
80
+ class GPTConfig:
81
+ block_size: int = 1024 # max sequence length
82
+ vocab_size: int = 50257 # number of tokens: 50,000 BPE merges + 256 bytes tokens + 1 <|endoftext|> token
83
+ n_layer: int = 12 # number of layers
84
+ n_head: int = 12 # number of heads
85
+ n_embd: int = 768 # embedding dimension
86
+
87
+
88
+ class GPT(nn.Module):
89
+
90
+ def __init__(self, config):
91
+ super().__init__()
92
+ self.config = config
93
+
94
+ self.transformer = nn.ModuleDict(dict(
95
+ wte = nn.Embedding(config.vocab_size, config.n_embd),
96
+ wpe = nn.Embedding(config.block_size, config.n_embd),
97
+ h = nn.ModuleList([Block(config) for _ in range(config.n_layer)]),
98
+ ln_f = nn.LayerNorm(config.n_embd),
99
+ ))
100
+ self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
101
+
102
+ # weight sharing
103
+ self.transformer.wte.weight = self.lm_head.weight
104
+
105
+ # weight initialization
106
+ self.apply(self._init_weights)
107
+
108
+ def _init_weights(self, module):
109
+ if isinstance(module, nn.Linear):
110
+ std = 0.02
111
+ if hasattr(module, 'NANGPT_SCALE_INIT'):
112
+ std *= (2 * self.config.n_layer) ** -0.5
113
+ torch.nn.init.normal_(module.weight, mean = 0.0, std = std)
114
+ if module.bias is not None:
115
+ torch.nn.init.zeros_(module.bias)
116
+ elif isinstance(module, nn.Embedding):
117
+ torch.nn.init.normal_(module.weight, mean=0.0, std = 0.02)
118
+
119
+
120
+
121
+ def forward(self, idx, targets=None):
122
+ # idx is of shape (B, T)
123
+ B, T = idx.size()
124
+ assert T <= self.config.block_size, f"Cannot forward sequence of length {T}, block size is only {self.config.block_size}"
125
+ # forward the token and posisition embeddings
126
+ pos = torch.arange(0, T, dtype=torch.long, device=idx.device) # shape (T)
127
+ pos_emb = self.transformer.wpe(pos) # position embeddings of shape (T, n_embd)
128
+ tok_emb = self.transformer.wte(idx) # token embeddings of shape (B, T, n_embd)
129
+ x = tok_emb + pos_emb
130
+ # forward the blocks of the transformer
131
+ for block in self.transformer.h:
132
+ x = block(x)
133
+ # forward the final layernorm and the classifier
134
+ x = self.transformer.ln_f(x)
135
+ logits = self.lm_head(x) # (B, T, vocab_size)
136
+ loss = None
137
+ if targets is not None:
138
+ loss = F.cross_entropy(logits.view(-1, logits.size(-1)), targets.view(-1))
139
+ return logits, loss
140
+
141
+ @classmethod
142
+ def from_pretrained(cls, model_type):
143
+ """Loads pretrained GPT-2 model weights from huggingface"""
144
+ assert model_type in {'gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'}
145
+ from transformers import GPT2LMHeadModel
146
+ print("loading weights from pretrained gpt: %s" % model_type)
147
+
148
+ # n_layer, n_head and n_embd are determined from model_type
149
+ config_args = {
150
+ 'gpt2': dict(n_layer=12, n_head=12, n_embd=768), # 124M params
151
+ 'gpt2-medium': dict(n_layer=24, n_head=16, n_embd=1024), # 350M params
152
+ 'gpt2-large': dict(n_layer=36, n_head=20, n_embd=1280), # 774M params
153
+ 'gpt2-xl': dict(n_layer=48, n_head=25, n_embd=1600), # 1558M params
154
+ }[model_type]
155
+ config_args['vocab_size'] = 50257 # always 50257 for GPT model checkpoints
156
+ config_args['block_size'] = 1024 # always 1024 for GPT model checkpoints
157
+ # create a from-scratch initialized minGPT model
158
+ config = GPTConfig(**config_args)
159
+ model = GPT(config)
160
+ sd = model.state_dict()
161
+ sd_keys = sd.keys()
162
+ sd_keys = [k for k in sd_keys if not k.endswith('.attn.bias')] # discard this mask / buffer, not a param
163
+
164
+ # init a huggingface/transformers model
165
+ model_hf = GPT2LMHeadModel.from_pretrained(model_type)
166
+ sd_hf = model_hf.state_dict()
167
+
168
+ # copy while ensuring all of the parameters are aligned and match in names and shapes
169
+ sd_keys_hf = sd_hf.keys()
170
+ sd_keys_hf = [k for k in sd_keys_hf if not k.endswith('.attn.masked_bias')] # ignore these, just a buffer
171
+ sd_keys_hf = [k for k in sd_keys_hf if not k.endswith('.attn.bias')] # same, just the mask (buffer)
172
+ transposed = ['attn.c_attn.weight', 'attn.c_proj.weight', 'mlp.c_fc.weight', 'mlp.c_proj.weight']
173
+ # basically the openai checkpoints use a "Conv1D" module, but we only want to use a vanilla Linear
174
+ # this means that we have to transpose these weights when we import them
175
+ assert len(sd_keys_hf) == len(sd_keys), f"mismatched keys: {len(sd_keys_hf)} != {len(sd_keys)}"
176
+ for k in sd_keys_hf:
177
+ if any(k.endswith(w) for w in transposed):
178
+ # special treatment for the Conv1D weights we need to transpose
179
+ assert sd_hf[k].shape[::-1] == sd[k].shape
180
+ with torch.no_grad():
181
+ sd[k].copy_(sd_hf[k].t())
182
+ else:
183
+ # vanilla copy over the other parameters
184
+ assert sd_hf[k].shape == sd[k].shape
185
+ with torch.no_grad():
186
+ sd[k].copy_(sd_hf[k])
187
+
188
+ return model
189
+
190
+ # model = GPT.from_pretrained('gpt2')
191
+
192
+ device = 'cpu'
193
+ if torch.cuda.is_available():
194
+ device = 'cuda'
195
+ elif hasattr(torch.backends, "mps") and torch.backends.mps.is_available():
196
+ device = "mps"
197
+ print(f"using device: {device}")
198
+
199
+ # SEED
200
+ torch.manual_seed(1337)
201
+ if torch.cuda.is_available():
202
+ torch.cuda.manual_seed(1337)
203
+
204
+ # STOP
205
+ num_return_sequences = 5
206
+ max_length = 30
207
+
208
+
209
+
210
+ import tiktoken
211
+
212
+ class DataLoaderLite:
213
+ def __init__(self, B, T):
214
+ self.B = B
215
+ self.T = T
216
+
217
+ # at init load tokens from disk and store them in memory
218
+ with open('input.txt', 'r') as f:
219
+ text = f.read()
220
+ enc = tiktoken.get_encoding('gpt2')
221
+ tokens = enc.encode(text)
222
+ self.tokens = torch.tensor(tokens)
223
+ print(f'loaded {len(self.tokens)} tokens')
224
+ print(f'1 epoch = {len(self.tokens) // (B * T)} batches')
225
+
226
+ # state
227
+ self.current_position = 0
228
+
229
+ def next_batch(self):
230
+ B, T = self.B, self.T
231
+ buf = self.tokens[self.current_position: self.current_position + B * T + 1]
232
+ x = (buf[:-1]).view(B, T) # inputs
233
+ y = (buf[1:]).view(B, T) # targets
234
+ # advance the position in the tensor
235
+ self.current_position += B*T
236
+ # if loading the next batch would be out of bounds, reset
237
+ if self.current_position + (B * T + 1) > len(self.tokens):
238
+ self.current_position = 0
239
+ return x, y
240
+
241
+
242
+ model = GPT(GPTConfig())
243
+ model.to(device)
244
+
245
+ train_loader = DataLoaderLite(B = 4, T = 32)
246
+
247
+ # NEW CODE
248
+ optimizer = torch.optim.AdamW(model.parameters(), lr = 3e-4)
249
+ for i in range(50):
250
+ x, y = train_loader.next_batch()
251
+ x, y = x.to(device), y.to(device)
252
+ optimizer.zero_grad()
253
+ logits, loss = model(x, y)
254
+ loss.backward()
255
+ optimizer.step()
256
+ print(f'step{i}, loss: {loss.item()}')
257
+
258
+
259
+ print(loss)
260
+ import sys; sys.exit(0)
261
+
262
+ torch.manual_seed(42)
263
+ torch.cuda.manual_seed(42)
264
+ while x.size(1) < max_length:
265
+ # forward the model to get the logits
266
+ with torch.no_grad():
267
+ logits = model(x)[0] # (B, T, vocab_size)
268
+ # take the logits at the last position
269
+ logits = logits[:, -1, :] # (B, vocab_size)
270
+ # get the probabilities
271
+ probs = F.softmax(logits, dim=-1)
272
+ # do top-k sampling of 50 (huggingface pipeline default)
273
+ # topk_probs here becomes (5, 50), topk_indices is (5, 50)
274
+ topk_probs, topk_indices = torch.topk(probs, 50, dim=-1)
275
+ # select a token from the top-k probabilities
276
+ # note: multinomial does not demand the input to sum to 1
277
+ ix = torch.multinomial(topk_probs, 1) # (B, 1)
278
+ # gather the corresponding indices
279
+ xcol = torch.gather(topk_indices, -1, ix) # (B, 1)
280
+ # append to the sequence
281
+ x = torch.cat((x, xcol), dim=1)
282
+
283
+ # print the generated text
284
+ for i in range(num_return_sequences):
285
+ tokens = x[i, :max_length].tolist()
286
+ decoded = enc.decode(tokens)
287
+ print(">", decoded)
training_log.txt ADDED
The diff for this file is too large to render. See raw diff