Commit #1.2: ipynb files
Browse files- GEM_1o.ipynb +1 -0
- GEM_Testings.ipynb +1 -0
GEM_1o.ipynb
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"cells":[{"cell_type":"markdown","metadata":{"id":"eIWTEqeNxpUK"},"source":["# **GEM-1o**"]},{"cell_type":"markdown","metadata":{"id":"JUGD6ddqA-zk"},"source":["---"]},{"cell_type":"markdown","metadata":{"id":"23R8xpZrAyWs"},"source":["## Model Architecture Design + Model Training\n","\n","---\n","\n"]},{"cell_type":"code","execution_count":5,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"collapsed":true,"executionInfo":{"elapsed":3797,"status":"ok","timestamp":1724070916029,"user":{"displayName":"Basab Jha","userId":"01698150105745770629"},"user_tz":-345},"id":"F0Eug0_Jmuuf","outputId":"4003da1c-0e43-425f-cf10-a1606742b0bd"},"outputs":[{"name":"stdout","output_type":"stream","text":["Drive already mounted at /content/drive; to attempt to forcibly remount, call drive.mount(\"/content/drive\", force_remount=True).\n"]}],"source":["from google.colab import drive\n","drive.mount('/content/drive')"]},{"cell_type":"code","execution_count":6,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":498,"status":"ok","timestamp":1724070920260,"user":{"displayName":"Basab Jha","userId":"01698150105745770629"},"user_tz":-345},"id":"sWF2RhIim70K","outputId":"de459418-08fd-4555-94b0-20b985db25ef"},"outputs":[{"name":"stdout","output_type":"stream","text":["/content/drive/MyDrive/GEM_Project\n"]}],"source":["%cd /content/drive/MyDrive/GEM_Project/"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"7X2MaofunYk4"},"outputs":[],"source":["!touch models/__init__.py models/gem_model.py\n","!touch utils/__init__.py utils/data_preprocessing.py utils/text_generation.py\n","!touch configs/config.py\n","!touch train.py generate.py requirements.txt"]},{"cell_type":"code","execution_count":null,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":800,"status":"ok","timestamp":1723255256180,"user":{"displayName":"Basab Jha","userId":"01698150105745770629"},"user_tz":-345},"id":"xnRDishQqbg5","outputId":"77e33a67-d10d-43d6-f0b9-9ba0e7a1d20d"},"outputs":[{"name":"stdout","output_type":"stream","text":["Overwriting requirements.txt\n"]}],"source":["%%writefile requirements.txt\n","torch\n","transformers\n","datasets\n","tensorboard\n","tokenizers\n","tqdm\n","wandb"]},{"cell_type":"code","execution_count":7,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"collapsed":true,"executionInfo":{"elapsed":2283,"status":"ok","timestamp":1724070926152,"user":{"displayName":"Basab Jha","userId":"01698150105745770629"},"user_tz":-345},"id":"n8CsV9y_ox7o","outputId":"ea755633-bd56-40be-f829-7d5b2292b6d8"},"outputs":[{"name":"stdout","output_type":"stream","text":["Requirement already satisfied: torch in /usr/local/lib/python3.10/dist-packages (from -r requirements.txt (line 1)) (2.3.0+cpu)\n","Requirement already satisfied: transformers in /usr/local/lib/python3.10/dist-packages (from -r requirements.txt (line 2)) (4.42.4)\n","Requirement already satisfied: datasets in /usr/local/lib/python3.10/dist-packages (from -r requirements.txt (line 3)) (2.21.0)\n","Requirement already satisfied: tensorboard in /usr/local/lib/python3.10/dist-packages (from -r requirements.txt (line 4)) (2.15.2)\n","Requirement already satisfied: tokenizers in /usr/local/lib/python3.10/dist-packages (from -r requirements.txt (line 5)) (0.19.1)\n","Requirement already satisfied: tqdm in /usr/local/lib/python3.10/dist-packages (from -r requirements.txt (line 6)) (4.66.5)\n","Requirement already satisfied: wandb in /usr/local/lib/python3.10/dist-packages (from -r requirements.txt (line 7)) (0.17.7)\n","Requirement already satisfied: filelock in /usr/local/lib/python3.10/dist-packages (from torch-\u003e-r requirements.txt (line 1)) (3.15.4)\n","Requirement already satisfied: typing-extensions\u003e=4.8.0 in /usr/local/lib/python3.10/dist-packages (from torch-\u003e-r requirements.txt (line 1)) (4.12.2)\n","Requirement already satisfied: sympy in /usr/local/lib/python3.10/dist-packages (from torch-\u003e-r requirements.txt (line 1)) (1.13.1)\n","Requirement already satisfied: networkx in /usr/local/lib/python3.10/dist-packages (from torch-\u003e-r requirements.txt (line 1)) (3.3)\n","Requirement already satisfied: jinja2 in /usr/local/lib/python3.10/dist-packages (from torch-\u003e-r requirements.txt (line 1)) (3.1.4)\n","Requirement already satisfied: fsspec in /usr/local/lib/python3.10/dist-packages (from torch-\u003e-r requirements.txt (line 1)) (2024.6.1)\n","Requirement already satisfied: huggingface-hub\u003c1.0,\u003e=0.23.2 in /usr/local/lib/python3.10/dist-packages (from transformers-\u003e-r requirements.txt (line 2)) (0.23.5)\n","Requirement already satisfied: numpy\u003c2.0,\u003e=1.17 in /usr/local/lib/python3.10/dist-packages (from transformers-\u003e-r requirements.txt (line 2)) (1.26.4)\n","Requirement already satisfied: packaging\u003e=20.0 in /usr/local/lib/python3.10/dist-packages (from transformers-\u003e-r requirements.txt (line 2)) (24.1)\n","Requirement already satisfied: pyyaml\u003e=5.1 in /usr/local/lib/python3.10/dist-packages (from transformers-\u003e-r requirements.txt (line 2)) (6.0.2)\n","Requirement already satisfied: regex!=2019.12.17 in /usr/local/lib/python3.10/dist-packages (from transformers-\u003e-r requirements.txt (line 2)) (2024.7.24)\n","Requirement already satisfied: requests in /usr/local/lib/python3.10/dist-packages (from transformers-\u003e-r requirements.txt (line 2)) (2.32.3)\n","Requirement already satisfied: safetensors\u003e=0.4.1 in /usr/local/lib/python3.10/dist-packages (from transformers-\u003e-r requirements.txt (line 2)) (0.4.4)\n","Requirement already satisfied: pyarrow\u003e=15.0.0 in /usr/local/lib/python3.10/dist-packages (from datasets-\u003e-r requirements.txt (line 3)) (17.0.0)\n","Requirement already satisfied: dill\u003c0.3.9,\u003e=0.3.0 in /usr/local/lib/python3.10/dist-packages (from datasets-\u003e-r requirements.txt (line 3)) (0.3.8)\n","Requirement already satisfied: pandas in /usr/local/lib/python3.10/dist-packages (from datasets-\u003e-r requirements.txt (line 3)) (2.1.4)\n","Requirement already satisfied: xxhash in /usr/local/lib/python3.10/dist-packages (from datasets-\u003e-r requirements.txt (line 3)) (3.5.0)\n","Requirement already satisfied: multiprocess in /usr/local/lib/python3.10/dist-packages (from datasets-\u003e-r requirements.txt (line 3)) (0.70.16)\n","Requirement already satisfied: aiohttp in /usr/local/lib/python3.10/dist-packages (from datasets-\u003e-r requirements.txt (line 3)) (3.10.4)\n","Requirement already satisfied: absl-py\u003e=0.4 in /usr/local/lib/python3.10/dist-packages (from tensorboard-\u003e-r requirements.txt (line 4)) (1.4.0)\n","Requirement already satisfied: grpcio\u003e=1.48.2 in /usr/local/lib/python3.10/dist-packages (from tensorboard-\u003e-r requirements.txt (line 4)) (1.65.4)\n","Requirement already satisfied: google-auth\u003c3,\u003e=1.6.3 in /usr/local/lib/python3.10/dist-packages (from tensorboard-\u003e-r requirements.txt (line 4)) (2.27.0)\n","Requirement already satisfied: google-auth-oauthlib\u003c2,\u003e=0.5 in /usr/local/lib/python3.10/dist-packages (from tensorboard-\u003e-r requirements.txt (line 4)) (1.2.1)\n","Requirement already satisfied: markdown\u003e=2.6.8 in /usr/local/lib/python3.10/dist-packages (from tensorboard-\u003e-r requirements.txt (line 4)) (3.6)\n","Requirement already satisfied: protobuf!=4.24.0,\u003e=3.19.6 in /usr/local/lib/python3.10/dist-packages (from tensorboard-\u003e-r requirements.txt (line 4)) (3.20.3)\n","Requirement already satisfied: setuptools\u003e=41.0.0 in /usr/local/lib/python3.10/dist-packages (from tensorboard-\u003e-r requirements.txt (line 4)) (71.0.4)\n","Requirement already satisfied: six\u003e1.9 in /usr/local/lib/python3.10/dist-packages (from tensorboard-\u003e-r requirements.txt (line 4)) (1.16.0)\n","Requirement already satisfied: tensorboard-data-server\u003c0.8.0,\u003e=0.7.0 in /usr/local/lib/python3.10/dist-packages (from tensorboard-\u003e-r requirements.txt (line 4)) (0.7.2)\n","Requirement already satisfied: werkzeug\u003e=1.0.1 in /usr/local/lib/python3.10/dist-packages (from tensorboard-\u003e-r requirements.txt (line 4)) (3.0.3)\n","Requirement already satisfied: click!=8.0.0,\u003e=7.1 in /usr/local/lib/python3.10/dist-packages (from wandb-\u003e-r requirements.txt (line 7)) (8.1.7)\n","Requirement already satisfied: docker-pycreds\u003e=0.4.0 in /usr/local/lib/python3.10/dist-packages (from wandb-\u003e-r requirements.txt (line 7)) (0.4.0)\n","Requirement already satisfied: gitpython!=3.1.29,\u003e=1.0.0 in /usr/local/lib/python3.10/dist-packages (from wandb-\u003e-r requirements.txt (line 7)) (3.1.43)\n","Requirement already satisfied: platformdirs in /usr/local/lib/python3.10/dist-packages (from wandb-\u003e-r requirements.txt (line 7)) (4.2.2)\n","Requirement already satisfied: psutil\u003e=5.0.0 in /usr/local/lib/python3.10/dist-packages (from wandb-\u003e-r requirements.txt (line 7)) (5.9.5)\n","Requirement already satisfied: sentry-sdk\u003e=1.0.0 in /usr/local/lib/python3.10/dist-packages (from wandb-\u003e-r requirements.txt (line 7)) (2.13.0)\n","Requirement already satisfied: setproctitle in /usr/local/lib/python3.10/dist-packages (from wandb-\u003e-r requirements.txt (line 7)) (1.3.3)\n","Requirement already satisfied: aiohappyeyeballs\u003e=2.3.0 in /usr/local/lib/python3.10/dist-packages (from aiohttp-\u003edatasets-\u003e-r requirements.txt (line 3)) (2.3.7)\n","Requirement already satisfied: aiosignal\u003e=1.1.2 in /usr/local/lib/python3.10/dist-packages (from aiohttp-\u003edatasets-\u003e-r requirements.txt (line 3)) (1.3.1)\n","Requirement already satisfied: attrs\u003e=17.3.0 in /usr/local/lib/python3.10/dist-packages (from aiohttp-\u003edatasets-\u003e-r requirements.txt (line 3)) (24.2.0)\n","Requirement already satisfied: frozenlist\u003e=1.1.1 in /usr/local/lib/python3.10/dist-packages (from aiohttp-\u003edatasets-\u003e-r requirements.txt (line 3)) (1.4.1)\n","Requirement already satisfied: multidict\u003c7.0,\u003e=4.5 in /usr/local/lib/python3.10/dist-packages (from aiohttp-\u003edatasets-\u003e-r requirements.txt (line 3)) (6.0.5)\n","Requirement already satisfied: yarl\u003c2.0,\u003e=1.0 in /usr/local/lib/python3.10/dist-packages (from aiohttp-\u003edatasets-\u003e-r requirements.txt (line 3)) (1.9.4)\n","Requirement already satisfied: async-timeout\u003c5.0,\u003e=4.0 in /usr/local/lib/python3.10/dist-packages (from aiohttp-\u003edatasets-\u003e-r requirements.txt (line 3)) (4.0.3)\n","Requirement already satisfied: gitdb\u003c5,\u003e=4.0.1 in /usr/local/lib/python3.10/dist-packages (from gitpython!=3.1.29,\u003e=1.0.0-\u003ewandb-\u003e-r requirements.txt (line 7)) (4.0.11)\n","Requirement already satisfied: cachetools\u003c6.0,\u003e=2.0.0 in /usr/local/lib/python3.10/dist-packages (from google-auth\u003c3,\u003e=1.6.3-\u003etensorboard-\u003e-r requirements.txt (line 4)) (5.4.0)\n","Requirement already satisfied: pyasn1-modules\u003e=0.2.1 in /usr/local/lib/python3.10/dist-packages (from google-auth\u003c3,\u003e=1.6.3-\u003etensorboard-\u003e-r requirements.txt (line 4)) (0.4.0)\n","Requirement already satisfied: rsa\u003c5,\u003e=3.1.4 in /usr/local/lib/python3.10/dist-packages (from google-auth\u003c3,\u003e=1.6.3-\u003etensorboard-\u003e-r requirements.txt (line 4)) (4.9)\n","Requirement already satisfied: requests-oauthlib\u003e=0.7.0 in /usr/local/lib/python3.10/dist-packages (from google-auth-oauthlib\u003c2,\u003e=0.5-\u003etensorboard-\u003e-r requirements.txt (line 4)) (2.0.0)\n","Requirement already satisfied: charset-normalizer\u003c4,\u003e=2 in /usr/local/lib/python3.10/dist-packages (from requests-\u003etransformers-\u003e-r requirements.txt (line 2)) (3.3.2)\n","Requirement already satisfied: idna\u003c4,\u003e=2.5 in /usr/local/lib/python3.10/dist-packages (from requests-\u003etransformers-\u003e-r requirements.txt (line 2)) (3.7)\n","Requirement already satisfied: urllib3\u003c3,\u003e=1.21.1 in /usr/local/lib/python3.10/dist-packages (from requests-\u003etransformers-\u003e-r requirements.txt (line 2)) (2.0.7)\n","Requirement already satisfied: certifi\u003e=2017.4.17 in /usr/local/lib/python3.10/dist-packages (from requests-\u003etransformers-\u003e-r requirements.txt (line 2)) (2024.7.4)\n","Requirement already satisfied: MarkupSafe\u003e=2.1.1 in /usr/local/lib/python3.10/dist-packages (from werkzeug\u003e=1.0.1-\u003etensorboard-\u003e-r requirements.txt (line 4)) (2.1.5)\n","Requirement already satisfied: python-dateutil\u003e=2.8.2 in /usr/local/lib/python3.10/dist-packages (from pandas-\u003edatasets-\u003e-r requirements.txt (line 3)) (2.9.0.post0)\n","Requirement already satisfied: pytz\u003e=2020.1 in /usr/local/lib/python3.10/dist-packages (from pandas-\u003edatasets-\u003e-r requirements.txt (line 3)) (2024.1)\n","Requirement already satisfied: tzdata\u003e=2022.1 in /usr/local/lib/python3.10/dist-packages (from pandas-\u003edatasets-\u003e-r requirements.txt (line 3)) (2024.1)\n","Requirement already satisfied: mpmath\u003c1.4,\u003e=1.1.0 in /usr/local/lib/python3.10/dist-packages (from sympy-\u003etorch-\u003e-r requirements.txt (line 1)) (1.3.0)\n","Requirement already satisfied: smmap\u003c6,\u003e=3.0.1 in /usr/local/lib/python3.10/dist-packages (from gitdb\u003c5,\u003e=4.0.1-\u003egitpython!=3.1.29,\u003e=1.0.0-\u003ewandb-\u003e-r requirements.txt (line 7)) (5.0.1)\n","Requirement already satisfied: pyasn1\u003c0.7.0,\u003e=0.4.6 in /usr/local/lib/python3.10/dist-packages (from pyasn1-modules\u003e=0.2.1-\u003egoogle-auth\u003c3,\u003e=1.6.3-\u003etensorboard-\u003e-r requirements.txt (line 4)) (0.6.0)\n","Requirement already satisfied: oauthlib\u003e=3.0.0 in /usr/local/lib/python3.10/dist-packages (from requests-oauthlib\u003e=0.7.0-\u003egoogle-auth-oauthlib\u003c2,\u003e=0.5-\u003etensorboard-\u003e-r requirements.txt (line 4)) (3.2.2)\n"]}],"source":["!pip install -r requirements.txt"]},{"cell_type":"code","execution_count":null,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":627,"status":"ok","timestamp":1723201614949,"user":{"displayName":"Basab Jha","userId":"01698150105745770629"},"user_tz":-345},"id":"t5wEzD2WmXiH","outputId":"c139c093-48c1-478b-c002-2e4eba127b84"},"outputs":[{"name":"stdout","output_type":"stream","text":["Overwriting configs/config.py\n"]}],"source":["%%writefile configs/config.py\n","\n","import torch\n","\n","MODEL_CONFIG = {\n"," 'VOCAB_SIZE': 50000,\n"," 'D_MODEL': 1024,\n"," 'N_HEADS': 32,\n"," 'D_FF': 4096,\n"," 'N_LAYERS': 32,\n"," 'MAX_SEQ_LEN': 512,\n"," 'BATCH_SIZE': 32,\n"," 'LEARNING_RATE': 1e-4,\n"," 'NUM_EPOCHS': 20,\n"," 'DEVICE': 'cuda' if torch.cuda.is_available() else 'cpu',\n"," 'WARMUP_STEPS': 4000,\n"," 'ADAM_EPSILON': 1e-8,\n"," 'WEIGHT_DECAY': 0.01,\n"," 'GRADIENT_ACCUMULATION_STEPS': 2,\n"," 'MAX_GRAD_NORM': 1.0,\n"," 'DROPOUT': 0.1,\n","}\n","\n","TRAINING_CONFIG = {\n"," 'CHECKPOINT_SAVE_STEPS': 5000,\n"," 'LOGGING_STEPS': 100,\n"," 'EVAL_STEPS': 1000,\n"," 'SAVE_TOTAL_LIMIT': 5\n","}"]},{"cell_type":"code","execution_count":null,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":8,"status":"ok","timestamp":1723061207270,"user":{"displayName":"Basab Jha","userId":"01698150105745770629"},"user_tz":-345},"id":"uYelKN24cZEd","outputId":"80ccf27c-b69c-4d49-f12f-2e4c4c6c22c3"},"outputs":[{"name":"stdout","output_type":"stream","text":["Overwriting configs/config.py\n"]}],"source":["%%writefile configs/config.py\n","\n","import torch\n","\n","MODEL_CONFIG = {\n"," 'VOCAB_SIZE': 10000,\n"," 'D_MODEL': 768,\n"," 'N_HEADS': 6,\n"," 'D_FF': 1028,\n"," 'N_LAYERS': 6,\n"," 'MAX_SEQ_LEN': 128,\n"," 'BATCH_SIZE': 32,\n"," 'LEARNING_RATE': 1e-4,\n"," 'NUM_EPOCHS': 10,\n"," 'DEVICE': 'cuda' if torch.cuda.is_available() else 'cpu',\n"," 'WARMUP_STEPS': 4000,\n"," 'ADAM_EPSILON': 1e-8,\n"," 'WEIGHT_DECAY': 0.01,\n"," 'GRADIENT_ACCUMULATION_STEPS': 1,\n"," 'MAX_GRAD_NORM': 1.0,\n"," 'DROPOUT': 0.1,\n","}\n","\n","TRAINING_CONFIG = {\n"," 'CHECKPOINT_SAVE_STEPS': 5000,\n"," 'LOGGING_STEPS': 100,\n"," 'EVAL_STEPS': 1000,\n"," 'SAVE_TOTAL_LIMIT': 5\n","}"]},{"cell_type":"code","execution_count":null,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":467,"status":"ok","timestamp":1723695802467,"user":{"displayName":"Basab Jha","userId":"01698150105745770629"},"user_tz":-345},"id":"wZEkwAeGn1k6","outputId":"d8445ba7-225d-45b4-a583-bdcd3c8b11ff"},"outputs":[{"name":"stdout","output_type":"stream","text":["Overwriting models/gem_model.py\n"]}],"source":["%%writefile models/gem_model.py\n","import torch\n","import torch.nn as nn\n","import torch.nn.functional as F\n","import math\n","\n","class PositionalEncoding(nn.Module):\n"," def __init__(self, d_model, max_len=512, dropout=0.1):\n"," super(PositionalEncoding, self).__init__()\n"," self.dropout = nn.Dropout(p=dropout)\n","\n"," position = torch.arange(0, max_len).unsqueeze(1)\n"," div_term = torch.exp(torch.arange(0, d_model, 2) * -(math.log(10000.0) / d_model))\n"," pe = torch.zeros(max_len, 1, d_model)\n"," pe[:, 0, 0::2] = torch.sin(position * div_term)\n"," pe[:, 0, 1::2] = torch.cos(position * div_term)\n"," self.register_buffer('pe', pe)\n","\n"," def forward(self, x):\n"," x = x + self.pe[:x.size(0), :]\n"," return self.dropout(x)\n","\n","class GEM(nn.Module):\n"," def __init__(self, vocab_size, d_model, n_heads, d_ff, n_layers, dropout=0.1):\n"," super(GEM, self).__init__()\n"," self.embedding = nn.Embedding(vocab_size, d_model)\n"," self.positional_encoding = PositionalEncoding(d_model, dropout=dropout)\n"," encoder_layers = nn.TransformerEncoderLayer(d_model, n_heads, d_ff, dropout, batch_first=True)\n"," self.transformer_encoder = nn.TransformerEncoder(encoder_layers, n_layers)\n"," self.fc_out = nn.Linear(d_model, vocab_size)\n"," self.d_model = d_model\n","\n"," def forward(self, input_ids, attention_mask=None):\n"," x = self.embedding(input_ids) * math.sqrt(self.d_model)\n"," x = self.positional_encoding(x)\n","\n"," if attention_mask is not None:\n"," # Ensure attention_mask is in the shape (batch_size, sequence_length)\n"," # Convert attention_mask to (batch_size, sequence_length) format\n"," attention_mask = attention_mask.bool() # Ensure it's a boolean tensor\n"," x = self.transformer_encoder(x, src_key_padding_mask=attention_mask)\n"," else:\n"," x = self.transformer_encoder(x)\n","\n"," x = self.fc_out(x)\n"," return x\n","\n"," def generate(self, input_ids, max_length, temperature=1.0):\n"," self.eval()\n"," with torch.no_grad():\n"," for _ in range(max_length - input_ids.size(1)):\n"," outputs = self(input_ids)\n"," next_token_logits = outputs[:, -1, :] / temperature\n"," next_token = torch.multinomial(F.softmax(next_token_logits, dim=-1), num_samples=1)\n"," input_ids = torch.cat([input_ids, next_token], dim=-1)\n"," return input_ids\n"]},{"cell_type":"code","execution_count":null,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":1230,"status":"ok","timestamp":1722932808637,"user":{"displayName":"Basab Jha","userId":"01698150105745770629"},"user_tz":-345},"id":"ekmWvK49oMqa","outputId":"068f3d43-c65e-4a28-8db9-38b80b74cd33"},"outputs":[{"name":"stdout","output_type":"stream","text":["Overwriting utils/text_generation.py\n"]}],"source":["%%writefile utils/text_generation.py\n","\n","import torch\n","\n","def generate_text(model, tokenizer, prompt, max_length=50, device='cuda'):\n"," model.eval()\n"," input_ids = tokenizer.encode(prompt, return_tensors='pt').to(device)\n","\n"," with torch.no_grad():\n"," for _ in range(max_length):\n"," outputs = model(input_ids)\n"," next_token_logits = outputs[:, -1, :]\n"," next_token = torch.argmax(next_token_logits, dim=-1).unsqueeze(0)\n"," input_ids = torch.cat([input_ids, next_token], dim=-1)\n","\n"," if next_token.item() == tokenizer.eos_token_id:\n"," break\n","\n"," return tokenizer.decode(input_ids[0], skip_special_tokens=True)"]},{"cell_type":"code","execution_count":null,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":547,"status":"ok","timestamp":1722933649048,"user":{"displayName":"Basab Jha","userId":"01698150105745770629"},"user_tz":-345},"id":"2dw_ucuJoUmk","outputId":"7d3c6f5b-5321-4eb8-8051-c6aebe320e33"},"outputs":[{"name":"stdout","output_type":"stream","text":["Overwriting generate.py\n"]}],"source":["%%writefile generate.py\n","\n","import torch\n","from models.gem_model import GEM\n","from utils.data_preprocessing import load_tokenizer\n","from configs.config import MODEL_CONFIG\n","\n","def generate_text(model, tokenizer, prompt, max_length=100, temperature=0.7):\n"," device = torch.device(MODEL_CONFIG['DEVICE'])\n"," input_ids = tokenizer.encode(prompt, return_tensors='pt').to(device)\n"," generated = model.generate(input_ids, max_length=max_length, temperature=temperature)\n"," return tokenizer.decode(generated[0], skip_special_tokens=True)\n","\n","def main():\n"," device = torch.device(MODEL_CONFIG['DEVICE'])\n","\n"," tokenizer = load_tokenizer()\n","\n"," model = GEM(\n"," vocab_size=MODEL_CONFIG['VOCAB_SIZE'],\n"," d_model=MODEL_CONFIG['D_MODEL'],\n"," n_heads=MODEL_CONFIG['N_HEADS'],\n"," d_ff=MODEL_CONFIG['D_FF'],\n"," n_layers=MODEL_CONFIG['N_LAYERS'],\n"," max_seq_len=MODEL_CONFIG['MAX_SEQ_LEN'],\n"," dropout=MODEL_CONFIG['DROPOUT']\n"," ).to(device)\n","\n"," checkpoint = torch.load('final_model/model.pt')\n"," model.load_state_dict(checkpoint['model_state_dict'])\n"," model.eval()\n","\n"," prompt = \"Once upon a time\"\n"," generated_text = generate_text(model, tokenizer, prompt, max_length=100)\n"," print(f\"Generated text:\\n{generated_text}\")\n","\n","if __name__ == \"__main__\":\n"," main()"]},{"cell_type":"code","execution_count":null,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":461,"status":"ok","timestamp":1723060981056,"user":{"displayName":"Basab Jha","userId":"01698150105745770629"},"user_tz":-345},"id":"yNpFD_8jqiCP","outputId":"62902e51-82b8-4920-ef6d-885cf5a4946e"},"outputs":[{"name":"stdout","output_type":"stream","text":["Overwriting utils/data_preprocessing.py\n"]}],"source":["%%writefile utils/data_preprocessing.py\n","\n","import torch\n","from torch.utils.data import DataLoader, Dataset\n","from datasets import load_dataset\n","from transformers import AutoTokenizer\n","\n","def train_tokenizer(texts, vocab_size=50000, min_frequency=2):\n"," tokenizer = AutoTokenizer.from_pretrained(\"gpt2\")\n"," tokenizer = tokenizer.train_new_from_iterator(texts, vocab_size=vocab_size, min_frequency=min_frequency)\n"," if tokenizer.pad_token is None:\n"," tokenizer.add_special_tokens({'pad_token': '[PAD]'})\n"," tokenizer.save_pretrained(\"./tokenizer\")\n"," return tokenizer\n","\n","def load_tokenizer():\n"," tokenizer = AutoTokenizer.from_pretrained(\"./tokenizer\")\n"," if tokenizer.pad_token is None:\n"," tokenizer.add_special_tokens({'pad_token': '[PAD]'})\n"," return tokenizer\n","\n","class TextDataset(Dataset):\n"," def __init__(self, texts, tokenizer, max_length):\n"," self.texts = texts\n"," self.tokenizer = tokenizer\n"," self.max_length = max_length\n","\n"," def __len__(self):\n"," return len(self.texts)\n","\n"," def __getitem__(self, idx):\n"," text = self.texts[idx]\n"," encodings = self.tokenizer(text, truncation=True, padding='max_length', max_length=self.max_length)\n"," return torch.tensor(encodings['input_ids'])\n","\n","def get_dataloader(dataset_name, config_name, tokenizer, max_length, batch_size):\n"," dataset = load_dataset(dataset_name, config_name)\n"," texts = dataset['train']['text'][:50] #delete [:500 for actual training set w/ full voxabsize]\n"," dataset = TextDataset(texts, tokenizer, max_length)\n"," dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True)\n"," return dataloader"]},{"cell_type":"code","execution_count":null,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":33,"status":"ok","timestamp":1723690257458,"user":{"displayName":"Basab Jha","userId":"01698150105745770629"},"user_tz":-345},"id":"GmTb2Wy0xomG","outputId":"c751f450-eb48-4815-a589-e7b444f1b1d7"},"outputs":[{"name":"stdout","output_type":"stream","text":["Overwriting train.py\n"]}],"source":["%%writefile train.py\n","\n","import torch\n","import torch.optim as optim\n","from torch.nn import functional as F\n","from torch.utils.data import DataLoader\n","from tqdm import tqdm\n","import wandb\n","from transformers import get_linear_schedule_with_warmup\n","from utils.data_preprocessing import get_dataloader, load_tokenizer\n","from models.gem_model import GEM\n","from configs.config import MODEL_CONFIG, TRAINING_CONFIG\n","\n","def train():\n"," wandb.init(project=\"GEM_Project\", config=MODEL_CONFIG, mode=\"offline\")\n"," print(\"WandB initialized in offline mode.\")\n","\n"," tokenizer = load_tokenizer()\n"," print(\"Tokenizer loaded.\")\n","\n"," dataloader = get_dataloader('wikitext', 'wikitext-2-raw-v1', tokenizer, MODEL_CONFIG['MAX_SEQ_LEN'], MODEL_CONFIG['BATCH_SIZE'])\n"," print(\"Dataloader created.\")\n","\n"," model = GEM(\n"," vocab_size=len(tokenizer),\n"," d_model=MODEL_CONFIG['D_MODEL'],\n"," n_heads=MODEL_CONFIG['N_HEADS'],\n"," d_ff=MODEL_CONFIG['D_FF'],\n"," n_layers=MODEL_CONFIG['N_LAYERS'],\n"," dropout=MODEL_CONFIG['DROPOUT']\n"," ).to(MODEL_CONFIG['DEVICE'])\n"," print(\"Model initialized.\")\n","\n"," optimizer = optim.AdamW(model.parameters(), lr=MODEL_CONFIG['LEARNING_RATE'], eps=MODEL_CONFIG['ADAM_EPSILON'])\n"," total_steps = len(dataloader) * MODEL_CONFIG['NUM_EPOCHS'] // MODEL_CONFIG['GRADIENT_ACCUMULATION_STEPS']\n"," scheduler = get_linear_schedule_with_warmup(\n"," optimizer,\n"," num_warmup_steps=MODEL_CONFIG['WARMUP_STEPS'],\n"," num_training_steps=total_steps\n"," )\n"," print(\"Optimizer and scheduler set up.\")\n","\n"," # Mixed precision setup\n"," scaler = torch.cuda.amp.GradScaler()\n","\n"," model.train()\n"," print(\"Starting training loop.\")\n"," for epoch in range(MODEL_CONFIG['NUM_EPOCHS']):\n"," print(f\"Epoch {epoch + 1}/{MODEL_CONFIG['NUM_EPOCHS']} started.\")\n"," for step, batch in enumerate(tqdm(dataloader, desc=f\"Epoch {epoch + 1}\")):\n"," batch = batch.to(MODEL_CONFIG['DEVICE'])\n","\n"," # Mixed precision training\n"," with torch.cuda.amp.autocast():\n"," outputs = model(batch)\n"," loss = F.cross_entropy(outputs.view(-1, outputs.size(-1)), batch.view(-1))\n","\n"," # Gradient accumulation\n"," loss = loss / MODEL_CONFIG['GRADIENT_ACCUMULATION_STEPS']\n"," scaler.scale(loss).backward()\n","\n"," if (step + 1) % MODEL_CONFIG['GRADIENT_ACCUMULATION_STEPS'] == 0:\n"," scaler.unscale_(optimizer)\n"," torch.nn.utils.clip_grad_norm_(model.parameters(), MODEL_CONFIG['MAX_GRAD_NORM'])\n"," scaler.step(optimizer)\n"," scaler.update()\n"," scheduler.step()\n"," optimizer.zero_grad()\n","\n"," if step % TRAINING_CONFIG['LOGGING_STEPS'] == 0:\n"," wandb.log({\"loss\": loss.item() * MODEL_CONFIG['GRADIENT_ACCUMULATION_STEPS']})\n","\n"," if step % TRAINING_CONFIG['EVAL_STEPS'] == 0:\n"," model.eval()\n"," with torch.no_grad():\n"," val_loss = sum(F.cross_entropy(model(batch).view(-1, outputs.size(-1)), batch.view(-1)).item() for batch in dataloader)\n"," wandb.log({\"val_loss\": val_loss / len(dataloader)})\n"," model.train()\n","\n"," if step % TRAINING_CONFIG['CHECKPOINT_SAVE_STEPS'] == 0:\n"," torch.save(model.state_dict(), f\"checkpoint_{epoch}_{step}.pt\")\n","\n"," torch.save(model.state_dict(), \"GEM_1o_Aug_15.pt\")\n"," print(\"Training complete. Final model saved.\")\n","\n","if __name__ == \"__main__\":\n"," train()"]},{"cell_type":"code","execution_count":null,"metadata":{"colab":{"background_save":true,"base_uri":"https://localhost:8080/"},"collapsed":true,"id":"IUDlCsqNq3Vm"},"outputs":[{"name":"stdout","output_type":"stream","text":["\u001b[34m\u001b[1mwandb\u001b[0m: Tracking run with wandb version 0.17.7\n","\u001b[34m\u001b[1mwandb\u001b[0m: W\u0026B syncing is set to \u001b[1m`offline`\u001b[0m in this directory. \n","\u001b[34m\u001b[1mwandb\u001b[0m: Run \u001b[1m`wandb online`\u001b[0m or set \u001b[1mWANDB_MODE=online\u001b[0m to enable cloud syncing.\n","WandB initialized in offline mode.\n","Tokenizer loaded.\n","Dataloader created.\n","Model initialized.\n","Optimizer and scheduler set up.\n","Starting training loop.\n","Epoch 1/20 started.\n","Epoch 1: 100% 2/2 [02:31\u003c00:00, 75.53s/it]\n","Epoch 2/20 started.\n","Epoch 2: 100% 2/2 [02:25\u003c00:00, 72.90s/it]\n","Epoch 3/20 started.\n","Epoch 3: 100% 2/2 [02:25\u003c00:00, 72.73s/it]\n","Epoch 4/20 started.\n","Epoch 4: 100% 2/2 [02:23\u003c00:00, 71.87s/it]\n","Epoch 5/20 started.\n","Epoch 5: 100% 2/2 [02:22\u003c00:00, 71.46s/it]\n","Epoch 6/20 started.\n","Epoch 6: 100% 2/2 [02:24\u003c00:00, 72.17s/it]\n","Epoch 7/20 started.\n","Epoch 7: 100% 2/2 [02:26\u003c00:00, 73.02s/it]\n","Epoch 8/20 started.\n","Epoch 8: 100% 2/2 [02:25\u003c00:00, 72.98s/it]\n","Epoch 9/20 started.\n","Epoch 9: 100% 2/2 [02:24\u003c00:00, 72.41s/it]\n","Epoch 10/20 started.\n","Epoch 10: 100% 2/2 [02:25\u003c00:00, 72.84s/it]\n","Epoch 11/20 started.\n","Epoch 11: 100% 2/2 [02:26\u003c00:00, 73.24s/it]\n","Epoch 12/20 started.\n","Epoch 12: 100% 2/2 [02:26\u003c00:00, 73.26s/it]\n","Epoch 13/20 started.\n","Epoch 13: 100% 2/2 [02:26\u003c00:00, 73.39s/it]\n","Epoch 14/20 started.\n","Epoch 14: 100% 2/2 [02:26\u003c00:00, 73.32s/it]\n","Epoch 15/20 started.\n","Epoch 15: 100% 2/2 [02:25\u003c00:00, 72.87s/it]\n","Epoch 16/20 started.\n","Epoch 16: 100% 2/2 [02:25\u003c00:00, 72.62s/it]\n","Epoch 17/20 started.\n","Epoch 17: 100% 2/2 [02:27\u003c00:00, 73.95s/it]\n","Epoch 18/20 started.\n","Epoch 18: 100% 2/2 [02:26\u003c00:00, 73.43s/it]\n","Epoch 19/20 started.\n","Epoch 19: 100% 2/2 [02:26\u003c00:00, 73.35s/it]\n","Epoch 20/20 started.\n","Epoch 20: 100% 2/2 [02:23\u003c00:00, 71.92s/it]\n","Training complete. Final model saved.\n","\u001b[34m\u001b[1mwandb\u001b[0m: You can sync this run to the cloud by running:\n","\u001b[34m\u001b[1mwandb\u001b[0m: \u001b[1mwandb sync /content/drive/MyDrive/GEM_Project/wandb/offline-run-20240819_123541-cx5lioht\u001b[0m\n","\u001b[34m\u001b[1mwandb\u001b[0m: Find logs at: \u001b[35m\u001b[1m./wandb/offline-run-20240819_123541-cx5lioht/logs\u001b[0m\n","\u001b[34m\u001b[1mwandb\u001b[0m: \u001b[33mWARNING\u001b[0m The new W\u0026B backend becomes opt-out in version 0.18.0; try it out with `wandb.require(\"core\")`! See https://wandb.me/wandb-core for more information.\n"]}],"source":["!python train.py"]},{"cell_type":"markdown","metadata":{"id":"qkQeb5PkBRnQ"},"source":["---"]},{"cell_type":"markdown","metadata":{"id":"NpswxjjNBWG9"},"source":["## Model Testing"]},{"cell_type":"code","execution_count":null,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":439,"status":"ok","timestamp":1723695707903,"user":{"displayName":"Basab Jha","userId":"01698150105745770629"},"user_tz":-345},"id":"Nmwl3gmcAl0-","outputId":"f16046ae-efa5-4d6e-91f5-b24b1958e02b"},"outputs":[{"name":"stdout","output_type":"stream","text":["Overwriting Testings/testing.py\n"]}],"source":["%%writefile Testings/testing.py\n","\n","import torch\n","import sys\n","import os\n","\n","# Add the parent directory of the model folder to the system path\n","sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '../models')))\n","\n","from gem_model import GEM\n","\n","# Configuration parameters for GEM\n","vocab_size = 50001 # Example vocab size, adjust if necessary\n","d_model = 1024 # Dimension of the model\n","n_heads = 32 # Number of attention heads\n","d_ff = 4096 # Dimension of the feedforward network\n","n_layers = 32 # Number of transformer layers\n","dropout = 0.1 # Dropout rate\n","\n","# Initialize the model\n","model = GEM(vocab_size, d_model, n_heads, d_ff, n_layers, dropout)\n","\n","# Load pre-trained weights\n","model_path = '/content/drive/MyDrive/GEM_Project/GEM_1o_Aug_15.pt'\n","model.load_state_dict(torch.load(model_path, map_location=torch.device('cpu')))\n","\n","# Set the model to evaluation mode\n","model.eval()\n","\n","# Define a function to convert text to token IDs (example)\n","def text_to_ids(tokenizer, text):\n"," # Implement this function based on your tokenizer's method\n"," return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(text))\n","\n","# Define a tokenizer or token conversion function (example placeholder)\n","class DummyTokenizer:\n"," def tokenize(self, text):\n"," # Simple tokenization example, replace with actual tokenizer\n"," return text.split()\n","\n"," def convert_tokens_to_ids(self, tokens):\n"," # Simple mapping example, replace with actual ID mapping\n"," return [ord(token[0]) % 50000 for token in tokens]\n","\n","# Initialize tokenizer\n","tokenizer = DummyTokenizer()\n","\n","# Test input\n","test_prompt = \"This is a test.\"\n","test_input_ids = torch.tensor(text_to_ids(tokenizer, test_prompt), dtype=torch.long).unsqueeze(0) # Add batch dimension\n","attention_mask = torch.ones(test_input_ids.shape, dtype=torch.bool)\n","\n","# Perform a forward pass\n","with torch.no_grad():\n"," outputs = model(test_input_ids, attention_mask)\n"," print(\"Model outputs:\")\n"," print(outputs)\n","\n","# Test the generate method\n","generation_prompt = \"Once upon a time\"\n","input_ids = torch.tensor(text_to_ids(tokenizer, generation_prompt), dtype=torch.long).unsqueeze(0) # Add batch dimension\n","generated_ids = model.generate(input_ids, max_length=10, temperature=1.0)\n","print(\"Generated IDs:\")\n","print(generated_ids)\n"]},{"cell_type":"code","execution_count":null,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":5582,"status":"ok","timestamp":1723695716506,"user":{"displayName":"Basab Jha","userId":"01698150105745770629"},"user_tz":-345},"id":"UCQ_A8F4CEKg","outputId":"e1231447-0078-4407-db8b-1a5505386381"},"outputs":[{"name":"stdout","output_type":"stream","text":["Traceback (most recent call last):\n"," File \"/content/drive/MyDrive/GEM_Project/Testings/testing.py\", line 54, in \u003cmodule\u003e\n"," outputs = model(test_input_ids, attention_mask)\n"," File \"/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py\", line 1532, in _wrapped_call_impl\n"," return self._call_impl(*args, **kwargs)\n"," File \"/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py\", line 1541, in _call_impl\n"," return forward_call(*args, **kwargs)\n"," File \"/content/drive/MyDrive/GEM_Project/models/gem_model.py\", line 39, in forward\n"," x = self.transformer_encoder(x, src_key_padding_mask=attention_mask)\n"," File \"/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py\", line 1532, in _wrapped_call_impl\n"," return self._call_impl(*args, **kwargs)\n"," File \"/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py\", line 1541, in _call_impl\n"," return forward_call(*args, **kwargs)\n"," File \"/usr/local/lib/python3.10/dist-packages/torch/nn/modules/transformer.py\", line 372, in forward\n"," and not torch._nested_tensor_from_mask_left_aligned(src, src_key_padding_mask.logical_not())):\n","RuntimeError: Mask size should match input size\n"]}],"source":["!python Testings/testing.py"]}],"metadata":{"accelerator":"TPU","colab":{"authorship_tag":"ABX9TyPYDLN40rxtEOV83XnEwfF2","collapsed_sections":["23R8xpZrAyWs"],"gpuType":"V28","mount_file_id":"1bsxLcwewZjzT0Av41Y89SL_wXayUdKG_","name":"","version":""},"kernelspec":{"display_name":"Python 3","name":"python3"},"language_info":{"name":"python"}},"nbformat":4,"nbformat_minor":0}
|
GEM_Testings.ipynb
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"nbformat":4,"nbformat_minor":0,"metadata":{"colab":{"provenance":[],"authorship_tag":"ABX9TyOpe6URbmt5CbVG1CVVUIr6"},"kernelspec":{"name":"python3","display_name":"Python 3"},"language_info":{"name":"python"}},"cells":[{"cell_type":"code","execution_count":1,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"e-NVlZMV8Oh7","executionInfo":{"status":"ok","timestamp":1723254070685,"user_tz":-345,"elapsed":31968,"user":{"displayName":"Basab Jha","userId":"01698150105745770629"}},"outputId":"f96aa546-1cf5-4890-ebdd-12197488c657"},"outputs":[{"output_type":"stream","name":"stdout","text":["Mounted at /content/drive\n"]}],"source":["from google.colab import drive\n","drive.mount('/content/drive')"]},{"cell_type":"code","source":["%cd /content/drive/MyDrive/GEM_Project/"],"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"3aTqbSUT8UZs","executionInfo":{"status":"ok","timestamp":1723254982318,"user_tz":-345,"elapsed":484,"user":{"displayName":"Basab Jha","userId":"01698150105745770629"}},"outputId":"0f7b31c2-329c-43f5-8c5d-06f7cf54663d"},"execution_count":16,"outputs":[{"output_type":"stream","name":"stdout","text":["/content/drive/MyDrive/GEM_Project\n"]}]},{"cell_type":"code","source":["%%writefile /GEM_Project/Testings/testing.py\n","import torch\n","from transformers import AutoTokenizer, AutoModelForCausalLM\n","\n","# Model and tokenizer paths\n","model_path = \"/GEM_Project/GEM_1o_Aug.pt\"\n","tokenizer_path = \"/GEM_Project/tokenizer/tokenizer\"\n","\n","# Load the tokenizer\n","tokenizer = AutoTokenizer.from_pretrained(tokenizer_path)\n","\n","# Load the model\n","model = AutoModelForCausalLM.from_pretrained(model_path)\n","\n","# Set the model to evaluation mode\n","model.eval()\n","\n","# Set device to GPU if available\n","device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n","model.to(device)\n","\n","# Define a function to generate text based on a prompt\n","def generate_text(prompt, max_length=50, num_return_sequences=1, no_repeat_ngram_size=2, top_k=50, top_p=0.95, temperature=0.7):\n"," # Tokenize the input prompt\n"," input_ids = tokenizer.encode(prompt, return_tensors='pt').to(device)\n","\n"," # Generate output from the model\n"," output = model.generate(\n"," input_ids,\n"," max_length=max_length,\n"," num_return_sequences=num_return_sequences,\n"," no_repeat_ngram_size=no_repeat_ngram_size,\n"," do_sample=True,\n"," top_k=top_k,\n"," top_p=top_p,\n"," temperature=temperature\n"," )\n","\n"," # Decode the generated output\n"," generated_text = tokenizer.decode(output[0], skip_special_tokens=True)\n"," return generated_text\n","\n","# Test prompts\n","prompts = [\n"," \"The future of artificial intelligence is\",\n"," \"Once upon a time in a land far away,\",\n"," \"In the field of natural language processing,\",\n"," \"The concept of creativity in machines is\"\n","]\n","\n","# Generate and print outputs for each prompt\n","for prompt in prompts:\n"," print(f\"Prompt: {prompt}\")\n"," generated_text = generate_text(prompt)\n"," print(f\"Generated: {generated_text}\\n\")\n"],"metadata":{"colab":{"base_uri":"https://localhost:8080/","height":530},"collapsed":true,"id":"h46NWxXr8oee","executionInfo":{"status":"error","timestamp":1723255000696,"user_tz":-345,"elapsed":503,"user":{"displayName":"Basab Jha","userId":"01698150105745770629"}},"outputId":"22a59c5f-b5a2-4eef-b30e-3d3be9c3a780"},"execution_count":17,"outputs":[{"output_type":"stream","name":"stdout","text":["Writing /GEM_Project/Testings/testing.py\n"]},{"output_type":"error","ename":"FileNotFoundError","evalue":"[Errno 2] No such file or directory: '/GEM_Project/Testings/testing.py'","traceback":["\u001b[0;31m---------------------------------------------------------------------------\u001b[0m","\u001b[0;31mFileNotFoundError\u001b[0m Traceback (most recent call last)","\u001b[0;32m<ipython-input-17-0335cf91e209>\u001b[0m in \u001b[0;36m<cell line: 1>\u001b[0;34m()\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0mget_ipython\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrun_cell_magic\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'writefile'\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m'/GEM_Project/Testings/testing.py'\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m'import torch\\nfrom transformers import AutoTokenizer, AutoModelForCausalLM\\n\\n# Model and tokenizer paths\\nmodel_path = \"/GEM_Project/GEM_1o_Aug.pt\"\\ntokenizer_path = \"/GEM_Project/tokenizer/tokenizer\"\\n\\n# Load the tokenizer\\ntokenizer = AutoTokenizer.from_pretrained(tokenizer_path)\\n\\n# Load the model\\nmodel = AutoModelForCausalLM.from_pretrained(model_path)\\n\\n# Set the model to evaluation mode\\nmodel.eval()\\n\\n# Set device to GPU if available\\ndevice = torch.device(\\'cuda\\' if torch.cuda.is_available() else \\'cpu\\')\\nmodel.to(device)\\n\\n# Define a function to generate text based on a prompt\\ndef generate_text(prompt, max_length=50, num_return_sequences=1, no_repeat_ngram_size=2, top_k=50, top_p=0.95, temperature=0.7):\\n # Tokenize the input prompt\\n input_ids = tokenizer.encode(prompt, return_tensors=\\'pt\\').to(device)\\n\\n # Generate output from the model\\n output = model.generate(\\n input_ids,\\n max_length=max_length,\\n num_return_sequences=num_return_sequences,\\n no_repeat_ngram_size=no_repeat_ngram_size,\\n do_sample=True,\\n top_k=top_k,\\n top_p=top_p,\\n temperature=temperature\\n )\\n\\n # Decode the generated output\\n generated_text = tokenizer.decode(output[0], skip_special_tokens=Tr...\n\u001b[0m","\u001b[0;32m/usr/local/lib/python3.10/dist-packages/google/colab/_shell.py\u001b[0m in \u001b[0;36mrun_cell_magic\u001b[0;34m(self, magic_name, line, cell)\u001b[0m\n\u001b[1;32m 332\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mline\u001b[0m \u001b[0;32mand\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0mcell\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 333\u001b[0m \u001b[0mcell\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m' '\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 334\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0msuper\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrun_cell_magic\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmagic_name\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mline\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mcell\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 335\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 336\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/usr/local/lib/python3.10/dist-packages/IPython/core/interactiveshell.py\u001b[0m in \u001b[0;36mrun_cell_magic\u001b[0;34m(self, magic_name, line, cell)\u001b[0m\n\u001b[1;32m 2471\u001b[0m \u001b[0;32mwith\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mbuiltin_trap\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 2472\u001b[0m \u001b[0margs\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0mmagic_arg_s\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mcell\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 2473\u001b[0;31m \u001b[0mresult\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mfn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 2474\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mresult\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 2475\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m<decorator-gen-98>\u001b[0m in \u001b[0;36mwritefile\u001b[0;34m(self, line, cell)\u001b[0m\n","\u001b[0;32m/usr/local/lib/python3.10/dist-packages/IPython/core/magic.py\u001b[0m in \u001b[0;36m<lambda>\u001b[0;34m(f, *a, **k)\u001b[0m\n\u001b[1;32m 185\u001b[0m \u001b[0;31m# but it's overkill for just that one bit of state.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 186\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mmagic_deco\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0marg\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 187\u001b[0;31m \u001b[0mcall\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mlambda\u001b[0m \u001b[0mf\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m*\u001b[0m\u001b[0ma\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mk\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mf\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0ma\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mk\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 188\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 189\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mcallable\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0marg\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/usr/local/lib/python3.10/dist-packages/IPython/core/magics/osm.py\u001b[0m in \u001b[0;36mwritefile\u001b[0;34m(self, line, cell)\u001b[0m\n\u001b[1;32m 854\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 855\u001b[0m \u001b[0mmode\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m'a'\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0margs\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mappend\u001b[0m \u001b[0;32melse\u001b[0m \u001b[0;34m'w'\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 856\u001b[0;31m \u001b[0;32mwith\u001b[0m \u001b[0mio\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mopen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mfilename\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmode\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mencoding\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m'utf-8'\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0mf\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 857\u001b[0m \u001b[0mf\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mwrite\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mcell\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;31mFileNotFoundError\u001b[0m: [Errno 2] No such file or directory: '/GEM_Project/Testings/testing.py'"]}]},{"cell_type":"code","source":["!python testing.py"],"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"X_02SGwK_BY6","executionInfo":{"status":"ok","timestamp":1723254885616,"user_tz":-345,"elapsed":7324,"user":{"displayName":"Basab Jha","userId":"01698150105745770629"}},"outputId":"e30ed0b8-e9c5-486d-c912-1a58701c6ae4"},"execution_count":14,"outputs":[{"output_type":"stream","name":"stdout","text":["Traceback (most recent call last):\n"," File \"/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py\", line 402, in cached_file\n"," resolved_file = hf_hub_download(\n"," File \"/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py\", line 106, in _inner_fn\n"," validate_repo_id(arg_value)\n"," File \"/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py\", line 154, in validate_repo_id\n"," raise HFValidationError(\n","huggingface_hub.errors.HFValidationError: Repo id must be in the form 'repo_name' or 'namespace/repo_name': './tokenizer/gem_tokenizer'. Use `repo_type` argument if needed.\n","\n","The above exception was the direct cause of the following exception:\n","\n","Traceback (most recent call last):\n"," File \"/content/drive/MyDrive/GEM_Project/Testings/testing.py\", line 9, in <module>\n"," tokenizer = AutoTokenizer.from_pretrained(tokenizer_path)\n"," File \"/usr/local/lib/python3.10/dist-packages/transformers/models/auto/tokenization_auto.py\", line 826, in from_pretrained\n"," tokenizer_config = get_tokenizer_config(pretrained_model_name_or_path, **kwargs)\n"," File \"/usr/local/lib/python3.10/dist-packages/transformers/models/auto/tokenization_auto.py\", line 658, in get_tokenizer_config\n"," resolved_config_file = cached_file(\n"," File \"/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py\", line 466, in cached_file\n"," raise EnvironmentError(\n","OSError: Incorrect path_or_model_id: './tokenizer/gem_tokenizer'. Please provide either the path to a local folder or the repo_id of a model on the Hub.\n"]}]}]}
|