crumb commited on
Commit
a4da470
·
1 Parent(s): 7314377

Upload GPT2_Linear_4bit_training.ipynb

Browse files
Files changed (1) hide show
  1. GPT2_Linear_4bit_training.ipynb +858 -0
GPT2_Linear_4bit_training.ipynb ADDED
@@ -0,0 +1,858 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "nbformat": 4,
3
+ "nbformat_minor": 0,
4
+ "metadata": {
5
+ "colab": {
6
+ "provenance": [],
7
+ "gpuType": "T4"
8
+ },
9
+ "kernelspec": {
10
+ "name": "python3",
11
+ "display_name": "Python 3"
12
+ },
13
+ "language_info": {
14
+ "name": "python"
15
+ },
16
+ "accelerator": "GPU",
17
+ "gpuClass": "standard",
18
+ "widgets": {
19
+ "application/vnd.jupyter.widget-state+json": {
20
+ "c1f06c162a994fe39bc1c72dcd732eb5": {
21
+ "model_module": "@jupyter-widgets/controls",
22
+ "model_name": "HBoxModel",
23
+ "model_module_version": "1.5.0",
24
+ "state": {
25
+ "_dom_classes": [],
26
+ "_model_module": "@jupyter-widgets/controls",
27
+ "_model_module_version": "1.5.0",
28
+ "_model_name": "HBoxModel",
29
+ "_view_count": null,
30
+ "_view_module": "@jupyter-widgets/controls",
31
+ "_view_module_version": "1.5.0",
32
+ "_view_name": "HBoxView",
33
+ "box_style": "",
34
+ "children": [
35
+ "IPY_MODEL_0972d5d3a6c94e6aa5da01ac427bc98a",
36
+ "IPY_MODEL_ad7adfc018ca4ebbbf582ea6e370dafe",
37
+ "IPY_MODEL_5ed735ca184b45158e432a280e6c6b5c"
38
+ ],
39
+ "layout": "IPY_MODEL_8d9e6b2e8e3147118c319ba4788795c5"
40
+ }
41
+ },
42
+ "0972d5d3a6c94e6aa5da01ac427bc98a": {
43
+ "model_module": "@jupyter-widgets/controls",
44
+ "model_name": "HTMLModel",
45
+ "model_module_version": "1.5.0",
46
+ "state": {
47
+ "_dom_classes": [],
48
+ "_model_module": "@jupyter-widgets/controls",
49
+ "_model_module_version": "1.5.0",
50
+ "_model_name": "HTMLModel",
51
+ "_view_count": null,
52
+ "_view_module": "@jupyter-widgets/controls",
53
+ "_view_module_version": "1.5.0",
54
+ "_view_name": "HTMLView",
55
+ "description": "",
56
+ "description_tooltip": null,
57
+ "layout": "IPY_MODEL_826c9c8d73d448b182343775d0004feb",
58
+ "placeholder": "​",
59
+ "style": "IPY_MODEL_cb6b93777f914372bb582e331faaae17",
60
+ "value": "Loading checkpoint shards: 100%"
61
+ }
62
+ },
63
+ "ad7adfc018ca4ebbbf582ea6e370dafe": {
64
+ "model_module": "@jupyter-widgets/controls",
65
+ "model_name": "FloatProgressModel",
66
+ "model_module_version": "1.5.0",
67
+ "state": {
68
+ "_dom_classes": [],
69
+ "_model_module": "@jupyter-widgets/controls",
70
+ "_model_module_version": "1.5.0",
71
+ "_model_name": "FloatProgressModel",
72
+ "_view_count": null,
73
+ "_view_module": "@jupyter-widgets/controls",
74
+ "_view_module_version": "1.5.0",
75
+ "_view_name": "ProgressView",
76
+ "bar_style": "success",
77
+ "description": "",
78
+ "description_tooltip": null,
79
+ "layout": "IPY_MODEL_71cfafa9755245de98399af9ea8a1cce",
80
+ "max": 3,
81
+ "min": 0,
82
+ "orientation": "horizontal",
83
+ "style": "IPY_MODEL_f01bf5b1b1b0433388823e3d3e2f7608",
84
+ "value": 3
85
+ }
86
+ },
87
+ "5ed735ca184b45158e432a280e6c6b5c": {
88
+ "model_module": "@jupyter-widgets/controls",
89
+ "model_name": "HTMLModel",
90
+ "model_module_version": "1.5.0",
91
+ "state": {
92
+ "_dom_classes": [],
93
+ "_model_module": "@jupyter-widgets/controls",
94
+ "_model_module_version": "1.5.0",
95
+ "_model_name": "HTMLModel",
96
+ "_view_count": null,
97
+ "_view_module": "@jupyter-widgets/controls",
98
+ "_view_module_version": "1.5.0",
99
+ "_view_name": "HTMLView",
100
+ "description": "",
101
+ "description_tooltip": null,
102
+ "layout": "IPY_MODEL_839b0090d16949f8ab5ca3f550759432",
103
+ "placeholder": "​",
104
+ "style": "IPY_MODEL_9e7bcd41202041eb91035eb005e2341f",
105
+ "value": " 3/3 [00:26<00:00, 8.65s/it]"
106
+ }
107
+ },
108
+ "8d9e6b2e8e3147118c319ba4788795c5": {
109
+ "model_module": "@jupyter-widgets/base",
110
+ "model_name": "LayoutModel",
111
+ "model_module_version": "1.2.0",
112
+ "state": {
113
+ "_model_module": "@jupyter-widgets/base",
114
+ "_model_module_version": "1.2.0",
115
+ "_model_name": "LayoutModel",
116
+ "_view_count": null,
117
+ "_view_module": "@jupyter-widgets/base",
118
+ "_view_module_version": "1.2.0",
119
+ "_view_name": "LayoutView",
120
+ "align_content": null,
121
+ "align_items": null,
122
+ "align_self": null,
123
+ "border": null,
124
+ "bottom": null,
125
+ "display": null,
126
+ "flex": null,
127
+ "flex_flow": null,
128
+ "grid_area": null,
129
+ "grid_auto_columns": null,
130
+ "grid_auto_flow": null,
131
+ "grid_auto_rows": null,
132
+ "grid_column": null,
133
+ "grid_gap": null,
134
+ "grid_row": null,
135
+ "grid_template_areas": null,
136
+ "grid_template_columns": null,
137
+ "grid_template_rows": null,
138
+ "height": null,
139
+ "justify_content": null,
140
+ "justify_items": null,
141
+ "left": null,
142
+ "margin": null,
143
+ "max_height": null,
144
+ "max_width": null,
145
+ "min_height": null,
146
+ "min_width": null,
147
+ "object_fit": null,
148
+ "object_position": null,
149
+ "order": null,
150
+ "overflow": null,
151
+ "overflow_x": null,
152
+ "overflow_y": null,
153
+ "padding": null,
154
+ "right": null,
155
+ "top": null,
156
+ "visibility": null,
157
+ "width": null
158
+ }
159
+ },
160
+ "826c9c8d73d448b182343775d0004feb": {
161
+ "model_module": "@jupyter-widgets/base",
162
+ "model_name": "LayoutModel",
163
+ "model_module_version": "1.2.0",
164
+ "state": {
165
+ "_model_module": "@jupyter-widgets/base",
166
+ "_model_module_version": "1.2.0",
167
+ "_model_name": "LayoutModel",
168
+ "_view_count": null,
169
+ "_view_module": "@jupyter-widgets/base",
170
+ "_view_module_version": "1.2.0",
171
+ "_view_name": "LayoutView",
172
+ "align_content": null,
173
+ "align_items": null,
174
+ "align_self": null,
175
+ "border": null,
176
+ "bottom": null,
177
+ "display": null,
178
+ "flex": null,
179
+ "flex_flow": null,
180
+ "grid_area": null,
181
+ "grid_auto_columns": null,
182
+ "grid_auto_flow": null,
183
+ "grid_auto_rows": null,
184
+ "grid_column": null,
185
+ "grid_gap": null,
186
+ "grid_row": null,
187
+ "grid_template_areas": null,
188
+ "grid_template_columns": null,
189
+ "grid_template_rows": null,
190
+ "height": null,
191
+ "justify_content": null,
192
+ "justify_items": null,
193
+ "left": null,
194
+ "margin": null,
195
+ "max_height": null,
196
+ "max_width": null,
197
+ "min_height": null,
198
+ "min_width": null,
199
+ "object_fit": null,
200
+ "object_position": null,
201
+ "order": null,
202
+ "overflow": null,
203
+ "overflow_x": null,
204
+ "overflow_y": null,
205
+ "padding": null,
206
+ "right": null,
207
+ "top": null,
208
+ "visibility": null,
209
+ "width": null
210
+ }
211
+ },
212
+ "cb6b93777f914372bb582e331faaae17": {
213
+ "model_module": "@jupyter-widgets/controls",
214
+ "model_name": "DescriptionStyleModel",
215
+ "model_module_version": "1.5.0",
216
+ "state": {
217
+ "_model_module": "@jupyter-widgets/controls",
218
+ "_model_module_version": "1.5.0",
219
+ "_model_name": "DescriptionStyleModel",
220
+ "_view_count": null,
221
+ "_view_module": "@jupyter-widgets/base",
222
+ "_view_module_version": "1.2.0",
223
+ "_view_name": "StyleView",
224
+ "description_width": ""
225
+ }
226
+ },
227
+ "71cfafa9755245de98399af9ea8a1cce": {
228
+ "model_module": "@jupyter-widgets/base",
229
+ "model_name": "LayoutModel",
230
+ "model_module_version": "1.2.0",
231
+ "state": {
232
+ "_model_module": "@jupyter-widgets/base",
233
+ "_model_module_version": "1.2.0",
234
+ "_model_name": "LayoutModel",
235
+ "_view_count": null,
236
+ "_view_module": "@jupyter-widgets/base",
237
+ "_view_module_version": "1.2.0",
238
+ "_view_name": "LayoutView",
239
+ "align_content": null,
240
+ "align_items": null,
241
+ "align_self": null,
242
+ "border": null,
243
+ "bottom": null,
244
+ "display": null,
245
+ "flex": null,
246
+ "flex_flow": null,
247
+ "grid_area": null,
248
+ "grid_auto_columns": null,
249
+ "grid_auto_flow": null,
250
+ "grid_auto_rows": null,
251
+ "grid_column": null,
252
+ "grid_gap": null,
253
+ "grid_row": null,
254
+ "grid_template_areas": null,
255
+ "grid_template_columns": null,
256
+ "grid_template_rows": null,
257
+ "height": null,
258
+ "justify_content": null,
259
+ "justify_items": null,
260
+ "left": null,
261
+ "margin": null,
262
+ "max_height": null,
263
+ "max_width": null,
264
+ "min_height": null,
265
+ "min_width": null,
266
+ "object_fit": null,
267
+ "object_position": null,
268
+ "order": null,
269
+ "overflow": null,
270
+ "overflow_x": null,
271
+ "overflow_y": null,
272
+ "padding": null,
273
+ "right": null,
274
+ "top": null,
275
+ "visibility": null,
276
+ "width": null
277
+ }
278
+ },
279
+ "f01bf5b1b1b0433388823e3d3e2f7608": {
280
+ "model_module": "@jupyter-widgets/controls",
281
+ "model_name": "ProgressStyleModel",
282
+ "model_module_version": "1.5.0",
283
+ "state": {
284
+ "_model_module": "@jupyter-widgets/controls",
285
+ "_model_module_version": "1.5.0",
286
+ "_model_name": "ProgressStyleModel",
287
+ "_view_count": null,
288
+ "_view_module": "@jupyter-widgets/base",
289
+ "_view_module_version": "1.2.0",
290
+ "_view_name": "StyleView",
291
+ "bar_color": null,
292
+ "description_width": ""
293
+ }
294
+ },
295
+ "839b0090d16949f8ab5ca3f550759432": {
296
+ "model_module": "@jupyter-widgets/base",
297
+ "model_name": "LayoutModel",
298
+ "model_module_version": "1.2.0",
299
+ "state": {
300
+ "_model_module": "@jupyter-widgets/base",
301
+ "_model_module_version": "1.2.0",
302
+ "_model_name": "LayoutModel",
303
+ "_view_count": null,
304
+ "_view_module": "@jupyter-widgets/base",
305
+ "_view_module_version": "1.2.0",
306
+ "_view_name": "LayoutView",
307
+ "align_content": null,
308
+ "align_items": null,
309
+ "align_self": null,
310
+ "border": null,
311
+ "bottom": null,
312
+ "display": null,
313
+ "flex": null,
314
+ "flex_flow": null,
315
+ "grid_area": null,
316
+ "grid_auto_columns": null,
317
+ "grid_auto_flow": null,
318
+ "grid_auto_rows": null,
319
+ "grid_column": null,
320
+ "grid_gap": null,
321
+ "grid_row": null,
322
+ "grid_template_areas": null,
323
+ "grid_template_columns": null,
324
+ "grid_template_rows": null,
325
+ "height": null,
326
+ "justify_content": null,
327
+ "justify_items": null,
328
+ "left": null,
329
+ "margin": null,
330
+ "max_height": null,
331
+ "max_width": null,
332
+ "min_height": null,
333
+ "min_width": null,
334
+ "object_fit": null,
335
+ "object_position": null,
336
+ "order": null,
337
+ "overflow": null,
338
+ "overflow_x": null,
339
+ "overflow_y": null,
340
+ "padding": null,
341
+ "right": null,
342
+ "top": null,
343
+ "visibility": null,
344
+ "width": null
345
+ }
346
+ },
347
+ "9e7bcd41202041eb91035eb005e2341f": {
348
+ "model_module": "@jupyter-widgets/controls",
349
+ "model_name": "DescriptionStyleModel",
350
+ "model_module_version": "1.5.0",
351
+ "state": {
352
+ "_model_module": "@jupyter-widgets/controls",
353
+ "_model_module_version": "1.5.0",
354
+ "_model_name": "DescriptionStyleModel",
355
+ "_view_count": null,
356
+ "_view_module": "@jupyter-widgets/base",
357
+ "_view_module_version": "1.2.0",
358
+ "_view_name": "StyleView",
359
+ "description_width": ""
360
+ }
361
+ }
362
+ }
363
+ }
364
+ },
365
+ "cells": [
366
+ {
367
+ "cell_type": "markdown",
368
+ "source": [
369
+ "# `transformers` meets `bitsandbytes` for democratzing Large Language Models (LLMs) through 4bit quantization - **Fork by [crumb](https://hf.co/crumbly) for GPT2-linear-XL**\n",
370
+ "\n",
371
+ "<center>\n",
372
+ "<img src=\"https://github.com/huggingface/blog/blob/main/assets/96_hf_bitsandbytes_integration/Thumbnail_blue.png?raw=true\" alt=\"drawing\" width=\"700\" class=\"center\"/>\n",
373
+ "</center>\n",
374
+ "\n",
375
+ "Welcome to this notebook that goes through the recent `bitsandbytes` integration that includes the work that introduces no performance degradation 4bit quantization techniques, for democratizing LLMs inference and training.\n",
376
+ "\n",
377
+ "In this notebook, we will learn together how to load a large model in 4bit ~~(`gpt-neo-x-20b`)~~ (`gpt2-xl`) and train it using Google Colab and PEFT library from Hugging Face 🤗.\n",
378
+ "\n",
379
+ "[In the general usage notebook](https://colab.research.google.com/drive/1ge2F1QSK8Q7h0hn3YKuBCOAS0bK8E0wf?usp=sharing), you can learn how to propely load a model in 4bit with all its variants.\n",
380
+ "\n",
381
+ "If you liked the previous work for integrating [*LLM.int8*](https://arxiv.org/abs/2208.07339), you can have a look at the [introduction blogpost](https://huggingface.co/blog/hf-bitsandbytes-integration) to lean more about that quantization method.\n"
382
+ ],
383
+ "metadata": {
384
+ "id": "XIyP_0r6zuVc"
385
+ }
386
+ },
387
+ {
388
+ "cell_type": "code",
389
+ "execution_count": 1,
390
+ "metadata": {
391
+ "id": "FuXIFTFapAMI",
392
+ "colab": {
393
+ "base_uri": "https://localhost:8080/"
394
+ },
395
+ "outputId": "e9be514a-cf54-49d1-f359-6851312f4e65"
396
+ },
397
+ "outputs": [
398
+ {
399
+ "output_type": "stream",
400
+ "name": "stdout",
401
+ "text": [
402
+ " Installing build dependencies ... \u001b[?25l\u001b[?25hdone\n",
403
+ " Getting requirements to build wheel ... \u001b[?25l\u001b[?25hdone\n",
404
+ " Preparing metadata (pyproject.toml) ... \u001b[?25l\u001b[?25hdone\n",
405
+ " Installing build dependencies ... \u001b[?25l\u001b[?25hdone\n",
406
+ " Getting requirements to build wheel ... \u001b[?25l\u001b[?25hdone\n",
407
+ " Preparing metadata (pyproject.toml) ... \u001b[?25l\u001b[?25hdone\n",
408
+ " Installing build dependencies ... \u001b[?25l\u001b[?25hdone\n",
409
+ " Getting requirements to build wheel ... \u001b[?25l\u001b[?25hdone\n",
410
+ " Preparing metadata (pyproject.toml) ... \u001b[?25l\u001b[?25hdone\n"
411
+ ]
412
+ }
413
+ ],
414
+ "source": [
415
+ "!pip install -q -U bitsandbytes\n",
416
+ "!pip install -q -U git+https://github.com/huggingface/transformers.git\n",
417
+ "!pip install -q -U git+https://github.com/huggingface/peft.git\n",
418
+ "!pip install -q -U git+https://github.com/huggingface/accelerate.git\n",
419
+ "!pip install -q datasets\n",
420
+ "!pip install -q wandb"
421
+ ]
422
+ },
423
+ {
424
+ "cell_type": "markdown",
425
+ "source": [
426
+ "First let's load the model we are going to use - GPT2-XL"
427
+ ],
428
+ "metadata": {
429
+ "id": "MJ-5idQwzvg-"
430
+ }
431
+ },
432
+ {
433
+ "cell_type": "code",
434
+ "source": [
435
+ "import torch\n",
436
+ "from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig\n",
437
+ "\n",
438
+ "# we'll use the bf16 version because it takes up 1/2 the space\n",
439
+ "# and is quicker to download\n",
440
+ "model_id = \"crumbly/gpt2-linear-xl-sharded-bf16\"\n",
441
+ "bnb_config = BitsAndBytesConfig(\n",
442
+ " load_in_4bit=True,\n",
443
+ " bnb_4bit_use_double_quant=True,\n",
444
+ " bnb_4bit_quant_type=\"nf4\",\n",
445
+ " bnb_4bit_compute_dtype=torch.bfloat16\n",
446
+ ")\n",
447
+ "\n",
448
+ "tokenizer = AutoTokenizer.from_pretrained(model_id)\n",
449
+ "model = AutoModelForCausalLM.from_pretrained(model_id, device_map={\"\":0}, quantization_config=bnb_config, trust_remote_code=True)"
450
+ ],
451
+ "metadata": {
452
+ "id": "E0Nl5mWL0k2T",
453
+ "colab": {
454
+ "base_uri": "https://localhost:8080/",
455
+ "height": 153,
456
+ "referenced_widgets": [
457
+ "c1f06c162a994fe39bc1c72dcd732eb5",
458
+ "0972d5d3a6c94e6aa5da01ac427bc98a",
459
+ "ad7adfc018ca4ebbbf582ea6e370dafe",
460
+ "5ed735ca184b45158e432a280e6c6b5c",
461
+ "8d9e6b2e8e3147118c319ba4788795c5",
462
+ "826c9c8d73d448b182343775d0004feb",
463
+ "cb6b93777f914372bb582e331faaae17",
464
+ "71cfafa9755245de98399af9ea8a1cce",
465
+ "f01bf5b1b1b0433388823e3d3e2f7608",
466
+ "839b0090d16949f8ab5ca3f550759432",
467
+ "9e7bcd41202041eb91035eb005e2341f"
468
+ ]
469
+ },
470
+ "outputId": "a550a9bf-0715-4be5-d03a-f5c348d0031b"
471
+ },
472
+ "execution_count": 2,
473
+ "outputs": [
474
+ {
475
+ "output_type": "stream",
476
+ "name": "stderr",
477
+ "text": [
478
+ "A new version of the following files was downloaded from https://huggingface.co/crumbly/gpt2-linear-xl:\n",
479
+ "- configuration_gpt2l.py\n",
480
+ ". Make sure to double-check they do not contain any added malicious code. To avoid downloading new versions of the code file, you can pin a revision.\n",
481
+ "A new version of the following files was downloaded from https://huggingface.co/crumbly/gpt2-linear-xl:\n",
482
+ "- modeling_gpt2l.py\n",
483
+ ". Make sure to double-check they do not contain any added malicious code. To avoid downloading new versions of the code file, you can pin a revision.\n"
484
+ ]
485
+ },
486
+ {
487
+ "output_type": "display_data",
488
+ "data": {
489
+ "text/plain": [
490
+ "Loading checkpoint shards: 0%| | 0/3 [00:00<?, ?it/s]"
491
+ ],
492
+ "application/vnd.jupyter.widget-view+json": {
493
+ "version_major": 2,
494
+ "version_minor": 0,
495
+ "model_id": "c1f06c162a994fe39bc1c72dcd732eb5"
496
+ }
497
+ },
498
+ "metadata": {}
499
+ }
500
+ ]
501
+ },
502
+ {
503
+ "cell_type": "code",
504
+ "source": [
505
+ "# generate just to verify that the model works and was loaded correctly\n",
506
+ "inputs = {k:v.cuda() for k,v in tokenizer(\"Once upon a time,\", return_tensors='pt').items()}\n",
507
+ "outputs = model.generate(**inputs, max_new_tokens=32, temperature=0.7, do_sample=True)\n",
508
+ "tokenizer.decode(outputs[0])"
509
+ ],
510
+ "metadata": {
511
+ "colab": {
512
+ "base_uri": "https://localhost:8080/",
513
+ "height": 53
514
+ },
515
+ "id": "DLZpPRoM9eb5",
516
+ "outputId": "3f75d30d-e097-4b24-cfce-d94e32352a7d"
517
+ },
518
+ "execution_count": 3,
519
+ "outputs": [
520
+ {
521
+ "output_type": "stream",
522
+ "name": "stderr",
523
+ "text": [
524
+ "Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.\n"
525
+ ]
526
+ },
527
+ {
528
+ "output_type": "execute_result",
529
+ "data": {
530
+ "text/plain": [
531
+ "'Once upon a time, it was said that the best way to predict the future was to take the actions of past generations and predict the future. Unfortunately, this is no longer true.'"
532
+ ],
533
+ "application/vnd.google.colaboratory.intrinsic+json": {
534
+ "type": "string"
535
+ }
536
+ },
537
+ "metadata": {},
538
+ "execution_count": 3
539
+ }
540
+ ]
541
+ },
542
+ {
543
+ "cell_type": "code",
544
+ "source": [
545
+ "# this isn't supported yet with the GPT2 model we use, but for other models:\n",
546
+ "# uncomment these lines and run them\n",
547
+ "# from peft import prepare_model_for_kbit_training\n",
548
+ "# model.gradient_checkpointing_enable()\n",
549
+ "# model = prepare_model_for_kbit_training(model)"
550
+ ],
551
+ "metadata": {
552
+ "id": "a9EUEDAl0ss3"
553
+ },
554
+ "execution_count": 4,
555
+ "outputs": []
556
+ },
557
+ {
558
+ "cell_type": "code",
559
+ "source": [
560
+ "def print_trainable_parameters(model):\n",
561
+ " \"\"\"\n",
562
+ " Prints the number of trainable parameters in the model.\n",
563
+ " \"\"\"\n",
564
+ " trainable_params = 0\n",
565
+ " all_param = 0\n",
566
+ " for _, param in model.named_parameters():\n",
567
+ " all_param += param.numel()\n",
568
+ " if param.requires_grad:\n",
569
+ " trainable_params += param.numel()\n",
570
+ " print(\n",
571
+ " f\"trainable params: {trainable_params} || all params: {all_param} || trainable%: {100 * trainable_params / all_param}\"\n",
572
+ " )"
573
+ ],
574
+ "metadata": {
575
+ "id": "gkIcwsSU01EB"
576
+ },
577
+ "execution_count": 5,
578
+ "outputs": []
579
+ },
580
+ {
581
+ "cell_type": "code",
582
+ "source": [
583
+ "from peft import LoraConfig, get_peft_model\n",
584
+ "\n",
585
+ "config = LoraConfig(\n",
586
+ " # ReLoRA uses r=128 by default in their code, but r=1 will even work to a degree\n",
587
+ " r=8,\n",
588
+ " lora_alpha=32,\n",
589
+ " # c_attn is our qkv\n",
590
+ " target_modules=[\"c_attn\"],\n",
591
+ " lora_dropout=0.05,\n",
592
+ " bias=\"none\",\n",
593
+ " task_type=\"CAUSAL_LM\"\n",
594
+ ")\n",
595
+ "\n",
596
+ "model = get_peft_model(model, config)\n",
597
+ "print_trainable_parameters(model)"
598
+ ],
599
+ "metadata": {
600
+ "colab": {
601
+ "base_uri": "https://localhost:8080/"
602
+ },
603
+ "id": "Ybeyl20n3dYH",
604
+ "outputId": "c49d63f7-675f-4a81-fcf0-b6d7a0e3b688"
605
+ },
606
+ "execution_count": 6,
607
+ "outputs": [
608
+ {
609
+ "output_type": "stream",
610
+ "name": "stdout",
611
+ "text": [
612
+ "trainable params: 2457600 || all params: 822788800 || trainable%: 0.2986914746530337\n"
613
+ ]
614
+ }
615
+ ]
616
+ },
617
+ {
618
+ "cell_type": "markdown",
619
+ "source": [
620
+ "Let's load a dataset, Open Orca, to fine tune our model on instruction sets. We'll use new lines to delimit between the system prompt, question, and response for simplicity."
621
+ ],
622
+ "metadata": {
623
+ "id": "FCc64bfnmd3j"
624
+ }
625
+ },
626
+ {
627
+ "cell_type": "code",
628
+ "source": [
629
+ "from datasets import load_dataset\n",
630
+ "\n",
631
+ "# we'll use streaming=True so we stream examples over the internet\n",
632
+ "# rather than downloading the entire dataset to process\n",
633
+ "data = load_dataset(\"Open-Orca/OpenOrca\", streaming=True)\n",
634
+ "\n",
635
+ "def strip(batch):\n",
636
+ " # to remove trailing spaces or newlines from our prompts\n",
637
+ " return [\n",
638
+ " i.strip() for i in list(batch)\n",
639
+ " ]\n",
640
+ "\n",
641
+ "def process(batch):\n",
642
+ " systems = [i for i in strip(batch['system_prompt'])]\n",
643
+ " questions = [i for i in strip(batch['question'])]\n",
644
+ " responses = [i for i in strip(batch['response'])]\n",
645
+ " prompts = zip(systems, questions, responses)\n",
646
+ " prompts = [\"\\n\".join(i) for i in prompts]\n",
647
+ " prompts = strip(prompts)\n",
648
+ " return prompts\n",
649
+ "\n",
650
+ "# we'll also set the max length to something lower than normal, so we don't go out-of-memory.\n",
651
+ "tokenizer.model_max_length = 768\n",
652
+ "data = data.map(lambda samples: tokenizer(process(samples), truncation=True), batched=True)"
653
+ ],
654
+ "metadata": {
655
+ "id": "s6f4z8EYmcJ6"
656
+ },
657
+ "execution_count": 7,
658
+ "outputs": []
659
+ },
660
+ {
661
+ "cell_type": "markdown",
662
+ "source": [
663
+ "Run the cell below to run the training! For the sake of the demo, we just ran it for few steps just to showcase how to use this integration with existing tools on the HF ecosystem."
664
+ ],
665
+ "metadata": {
666
+ "id": "_0MOtwf3zdZp"
667
+ }
668
+ },
669
+ {
670
+ "cell_type": "code",
671
+ "source": [
672
+ "import transformers\n",
673
+ "\n",
674
+ "# needed for gpt-neo-x tokenizer\n",
675
+ "tokenizer.pad_token = tokenizer.eos_token\n",
676
+ "\n",
677
+ "trainer = transformers.Trainer(\n",
678
+ " model=model,\n",
679
+ " train_dataset=data[\"train\"],\n",
680
+ " args=transformers.TrainingArguments(\n",
681
+ " # your 'effective batch size' is the product of these two numbers\n",
682
+ " per_device_train_batch_size=1,\n",
683
+ " gradient_accumulation_steps=8,\n",
684
+ "\n",
685
+ " # you can count the examples you're going to train on by\n",
686
+ " # multiplying max_steps by your effective batch size\n",
687
+ " # here we'll train on 512 examples, for example\n",
688
+ " max_steps=64,\n",
689
+ " warmup_steps=16,\n",
690
+ "\n",
691
+ " learning_rate=2e-4,\n",
692
+ " fp16=True,\n",
693
+ " logging_steps=4,\n",
694
+ " output_dir=\"outputs\",\n",
695
+ " optim=\"paged_adamw_8bit\",\n",
696
+ "\n",
697
+ " # if you want to log the loss graph to your wandb, change \"none\" to \"wandb\"\n",
698
+ " report_to=\"none\"\n",
699
+ " ),\n",
700
+ " data_collator=transformers.DataCollatorForLanguageModeling(tokenizer, mlm=False),\n",
701
+ ")\n",
702
+ "model.config.use_cache = False # silence the warnings. Please re-enable for inference!\n",
703
+ "trainer.train()"
704
+ ],
705
+ "metadata": {
706
+ "colab": {
707
+ "base_uri": "https://localhost:8080/",
708
+ "height": 629
709
+ },
710
+ "id": "jq0nX33BmfaC",
711
+ "outputId": "910a63f5-d181-40da-f24b-c6a968fa8415"
712
+ },
713
+ "execution_count": 8,
714
+ "outputs": [
715
+ {
716
+ "output_type": "stream",
717
+ "name": "stderr",
718
+ "text": [
719
+ "You're using a GPT2TokenizerFast tokenizer. Please note that with a fast tokenizer, using the `__call__` method is faster than using a method to encode the text followed by a call to the `pad` method to get a padded encoding.\n"
720
+ ]
721
+ },
722
+ {
723
+ "output_type": "display_data",
724
+ "data": {
725
+ "text/plain": [
726
+ "<IPython.core.display.HTML object>"
727
+ ],
728
+ "text/html": [
729
+ "\n",
730
+ " <div>\n",
731
+ " \n",
732
+ " <progress value='64' max='64' style='width:300px; height:20px; vertical-align: middle;'></progress>\n",
733
+ " [64/64 04:43, Epoch 1/9223372036854775807]\n",
734
+ " </div>\n",
735
+ " <table border=\"1\" class=\"dataframe\">\n",
736
+ " <thead>\n",
737
+ " <tr style=\"text-align: left;\">\n",
738
+ " <th>Step</th>\n",
739
+ " <th>Training Loss</th>\n",
740
+ " </tr>\n",
741
+ " </thead>\n",
742
+ " <tbody>\n",
743
+ " <tr>\n",
744
+ " <td>4</td>\n",
745
+ " <td>2.849300</td>\n",
746
+ " </tr>\n",
747
+ " <tr>\n",
748
+ " <td>8</td>\n",
749
+ " <td>2.507900</td>\n",
750
+ " </tr>\n",
751
+ " <tr>\n",
752
+ " <td>12</td>\n",
753
+ " <td>2.744300</td>\n",
754
+ " </tr>\n",
755
+ " <tr>\n",
756
+ " <td>16</td>\n",
757
+ " <td>2.537700</td>\n",
758
+ " </tr>\n",
759
+ " <tr>\n",
760
+ " <td>20</td>\n",
761
+ " <td>2.808800</td>\n",
762
+ " </tr>\n",
763
+ " <tr>\n",
764
+ " <td>24</td>\n",
765
+ " <td>2.619400</td>\n",
766
+ " </tr>\n",
767
+ " <tr>\n",
768
+ " <td>28</td>\n",
769
+ " <td>2.521000</td>\n",
770
+ " </tr>\n",
771
+ " <tr>\n",
772
+ " <td>32</td>\n",
773
+ " <td>2.543500</td>\n",
774
+ " </tr>\n",
775
+ " <tr>\n",
776
+ " <td>36</td>\n",
777
+ " <td>2.439600</td>\n",
778
+ " </tr>\n",
779
+ " <tr>\n",
780
+ " <td>40</td>\n",
781
+ " <td>2.369900</td>\n",
782
+ " </tr>\n",
783
+ " <tr>\n",
784
+ " <td>44</td>\n",
785
+ " <td>2.448100</td>\n",
786
+ " </tr>\n",
787
+ " <tr>\n",
788
+ " <td>48</td>\n",
789
+ " <td>2.389500</td>\n",
790
+ " </tr>\n",
791
+ " <tr>\n",
792
+ " <td>52</td>\n",
793
+ " <td>2.331100</td>\n",
794
+ " </tr>\n",
795
+ " <tr>\n",
796
+ " <td>56</td>\n",
797
+ " <td>2.366500</td>\n",
798
+ " </tr>\n",
799
+ " <tr>\n",
800
+ " <td>60</td>\n",
801
+ " <td>2.401100</td>\n",
802
+ " </tr>\n",
803
+ " <tr>\n",
804
+ " <td>64</td>\n",
805
+ " <td>2.153900</td>\n",
806
+ " </tr>\n",
807
+ " </tbody>\n",
808
+ "</table><p>"
809
+ ]
810
+ },
811
+ "metadata": {}
812
+ },
813
+ {
814
+ "output_type": "execute_result",
815
+ "data": {
816
+ "text/plain": [
817
+ "TrainOutput(global_step=64, training_loss=2.5019835233688354, metrics={'train_runtime': 303.3326, 'train_samples_per_second': 1.688, 'train_steps_per_second': 0.211, 'total_flos': 802220553600000.0, 'train_loss': 2.5019835233688354, 'epoch': 1.0})"
818
+ ]
819
+ },
820
+ "metadata": {},
821
+ "execution_count": 8
822
+ }
823
+ ]
824
+ },
825
+ {
826
+ "cell_type": "markdown",
827
+ "source": [
828
+ "To save your adapters, you can either use\n",
829
+ "\n",
830
+ "```python\n",
831
+ "model.save_pretrained(\"local_folder\")\n",
832
+ "```\n",
833
+ "\n",
834
+ "or push them to the hub with\n",
835
+ "\n",
836
+ "```python\n",
837
+ "model.push_to_hub(\"myusername/my_repo\")\n",
838
+ "```\n",
839
+ "\n",
840
+ "If you would like to merge the adapters into your model, you'll have to load the base model again without quantization, and merge them like this.\n",
841
+ "\n",
842
+ "```python\n",
843
+ "from peft import PeftModel\n",
844
+ "from transformers import AutoModelForCausalLM, AutoTokenizer\n",
845
+ "\n",
846
+ "model = AutoModelForCausalLM.from_pretrained(\"crumbly/gpt2-linear-xl-sharded-bf16\")\n",
847
+ "model = PeftModel.from_pretrained(model, \"myusername/my_repo\")\n",
848
+ "model = model.merge_and_unload()\n",
849
+ "```\n",
850
+ "\n",
851
+ "You can then push that to the hub or save it to a local folder like before, but including all of the weights."
852
+ ],
853
+ "metadata": {
854
+ "id": "NsGnWFe8mr0p"
855
+ }
856
+ }
857
+ ]
858
+ }