Commit
·
7b90450
1
Parent(s):
4fca63d
Upload Alpaca_+_Mistral_7b_full_example.ipynb
Browse files
Alpaca_+_Mistral_7b_full_example.ipynb
CHANGED
@@ -5,9 +5,9 @@
|
|
5 |
"source": [
|
6 |
"To run this, press \"Runtime\" and press \"Run all\" on a **free** Tesla T4 Google Colab instance!\n",
|
7 |
"<div class=\"align-center\">\n",
|
8 |
-
" <a href=\"https://github.com/unslothai/unsloth\"><img src=\"https://github.com/unslothai/unsloth/raw/main/images/unsloth%20new%20logo.png\"
|
9 |
-
" <a href=\"https://discord.gg/u54VK8m8tk\"><img src=\"https://github.com/unslothai/unsloth/raw/main/images/Discord.png\"
|
10 |
-
" <a href=\"https://
|
11 |
"</div>\n",
|
12 |
"\n",
|
13 |
"To install Unsloth on your own computer, follow the installation instructions on our Github page [here](https://github.com/unslothai/unsloth#installation-instructions---conda).\n",
|
@@ -272,6 +272,15 @@
|
|
272 |
"dtype = None # None for auto detection. Float16 for Tesla T4, V100, Bfloat16 for Ampere+\n",
|
273 |
"load_in_4bit = True # Use 4bit quantization to reduce memory usage. Can be False.\n",
|
274 |
"\n",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
275 |
"model, tokenizer = FastLanguageModel.from_pretrained(\n",
|
276 |
" model_name = \"unsloth/mistral-7b-bnb-4bit\", # \"unsloth/mistral-7b\" for 16bit loading\n",
|
277 |
" max_seq_length = max_seq_length,\n",
|
@@ -316,8 +325,8 @@
|
|
316 |
" target_modules = [\"q_proj\", \"k_proj\", \"v_proj\", \"o_proj\",\n",
|
317 |
" \"gate_proj\", \"up_proj\", \"down_proj\",],\n",
|
318 |
" lora_alpha = 16,\n",
|
319 |
-
" lora_dropout = 0, #
|
320 |
-
" bias = \"none\", #
|
321 |
" use_gradient_checkpointing = True,\n",
|
322 |
" random_state = 3407,\n",
|
323 |
" max_seq_length = max_seq_length,\n",
|
@@ -999,39 +1008,6 @@
|
|
999 |
"execution_count": null,
|
1000 |
"outputs": []
|
1001 |
},
|
1002 |
-
{
|
1003 |
-
"cell_type": "markdown",
|
1004 |
-
"source": [
|
1005 |
-
"To save to `GGUF` / `llama.cpp`, or for model merging, use `model.merge_and_unload` first, then save the model. See this [issue](https://github.com/ggerganov/llama.cpp/issues/3097) on llama.cpp for more info."
|
1006 |
-
],
|
1007 |
-
"metadata": {
|
1008 |
-
"id": "TCv4vXHd61i7"
|
1009 |
-
}
|
1010 |
-
},
|
1011 |
-
{
|
1012 |
-
"cell_type": "code",
|
1013 |
-
"source": [
|
1014 |
-
"model = model.merge_and_unload()"
|
1015 |
-
],
|
1016 |
-
"metadata": {
|
1017 |
-
"id": "xcRjsZe0RK1b",
|
1018 |
-
"colab": {
|
1019 |
-
"base_uri": "https://localhost:8080/"
|
1020 |
-
},
|
1021 |
-
"outputId": "b40d4730-b9dc-4a4b-e42c-c191060ff66c"
|
1022 |
-
},
|
1023 |
-
"execution_count": null,
|
1024 |
-
"outputs": [
|
1025 |
-
{
|
1026 |
-
"output_type": "stream",
|
1027 |
-
"name": "stderr",
|
1028 |
-
"text": [
|
1029 |
-
"/usr/local/lib/python3.10/dist-packages/peft/tuners/lora/bnb.py:229: UserWarning: Merge lora module to 4-bit linear may get different generations due to rounding errors.\n",
|
1030 |
-
" warnings.warn(\n"
|
1031 |
-
]
|
1032 |
-
}
|
1033 |
-
]
|
1034 |
-
},
|
1035 |
{
|
1036 |
"cell_type": "markdown",
|
1037 |
"source": [
|
@@ -1098,14 +1074,164 @@
|
|
1098 |
}
|
1099 |
]
|
1100 |
},
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1101 |
{
|
1102 |
"cell_type": "markdown",
|
1103 |
"source": [
|
1104 |
"And we're done! If you have any questions on Unsloth, we have a [Discord](https://discord.gg/u54VK8m8tk) channel! If you find any bugs or want to keep updated with the latest LLM stuff, or need help, join projects etc, feel free to join our Discord!\n",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1105 |
"<div class=\"align-center\">\n",
|
1106 |
-
" <a href=\"https://github.com/unslothai/unsloth\"><img src=\"https://github.com/unslothai/unsloth/raw/main/images/unsloth%20new%20logo.png\"
|
1107 |
-
" <a href=\"https://discord.gg/u54VK8m8tk\"><img src=\"https://github.com/unslothai/unsloth/raw/main/images/Discord.png\"
|
1108 |
-
" <a href=\"https://
|
1109 |
"</div>"
|
1110 |
],
|
1111 |
"metadata": {
|
|
|
5 |
"source": [
|
6 |
"To run this, press \"Runtime\" and press \"Run all\" on a **free** Tesla T4 Google Colab instance!\n",
|
7 |
"<div class=\"align-center\">\n",
|
8 |
+
" <a href=\"https://github.com/unslothai/unsloth\"><img src=\"https://github.com/unslothai/unsloth/raw/main/images/unsloth%20new%20logo.png\" height=\"45\"></a>\n",
|
9 |
+
" <a href=\"https://discord.gg/u54VK8m8tk\"><img src=\"https://github.com/unslothai/unsloth/raw/main/images/Discord.png\" height=\"45\"></a>\n",
|
10 |
+
" <a href=\"https://ko-fi.com/unsloth\"><img src=\"https://github.com/unslothai/unsloth/raw/main/images/Kofi button.png\" height=\"45\"></a></a> Join our Discord if you need help!\n",
|
11 |
"</div>\n",
|
12 |
"\n",
|
13 |
"To install Unsloth on your own computer, follow the installation instructions on our Github page [here](https://github.com/unslothai/unsloth#installation-instructions---conda).\n",
|
|
|
272 |
"dtype = None # None for auto detection. Float16 for Tesla T4, V100, Bfloat16 for Ampere+\n",
|
273 |
"load_in_4bit = True # Use 4bit quantization to reduce memory usage. Can be False.\n",
|
274 |
"\n",
|
275 |
+
"# 4bit pre quantized models we support for 4x faster downloading!\n",
|
276 |
+
"fourbit_models = [\n",
|
277 |
+
" \"unsloth/mistral-7b-bnb-4bit\",\n",
|
278 |
+
" \"unsloth/llama-2-7b-bnb-4bit\",\n",
|
279 |
+
" \"unsloth/llama-2-13b-bnb-4bit\",\n",
|
280 |
+
" \"unsloth/codellama-34b-bnb-4bit\",\n",
|
281 |
+
" \"unsloth/tinyllama-bnb-4bit\",\n",
|
282 |
+
"]\n",
|
283 |
+
"\n",
|
284 |
"model, tokenizer = FastLanguageModel.from_pretrained(\n",
|
285 |
" model_name = \"unsloth/mistral-7b-bnb-4bit\", # \"unsloth/mistral-7b\" for 16bit loading\n",
|
286 |
" max_seq_length = max_seq_length,\n",
|
|
|
325 |
" target_modules = [\"q_proj\", \"k_proj\", \"v_proj\", \"o_proj\",\n",
|
326 |
" \"gate_proj\", \"up_proj\", \"down_proj\",],\n",
|
327 |
" lora_alpha = 16,\n",
|
328 |
+
" lora_dropout = 0, # Supports any, but = 0 is optimized\n",
|
329 |
+
" bias = \"none\", # Supports any, but = \"none\" is optimized\n",
|
330 |
" use_gradient_checkpointing = True,\n",
|
331 |
" random_state = 3407,\n",
|
332 |
" max_seq_length = max_seq_length,\n",
|
|
|
1008 |
"execution_count": null,
|
1009 |
"outputs": []
|
1010 |
},
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1011 |
{
|
1012 |
"cell_type": "markdown",
|
1013 |
"source": [
|
|
|
1074 |
}
|
1075 |
]
|
1076 |
},
|
1077 |
+
{
|
1078 |
+
"cell_type": "markdown",
|
1079 |
+
"source": [
|
1080 |
+
"To save to `GGUF` / `llama.cpp`, we support it natively now! You can also go to our dedicated GGUF notebook [here](https://colab.research.google.com/drive/14DW0VwuqL2O3tqGlX7aUF6TOBA8S59M4?usp=sharing). Select either `save locally` for local saving or `save locally and quantize to 4bit` for 4bit quantization for llama.cpp / GGUF."
|
1081 |
+
],
|
1082 |
+
"metadata": {
|
1083 |
+
"id": "TCv4vXHd61i7"
|
1084 |
+
}
|
1085 |
+
},
|
1086 |
+
{
|
1087 |
+
"cell_type": "code",
|
1088 |
+
"source": [
|
1089 |
+
"#@title Code for conversion to GGUF\n",
|
1090 |
+
"def colab_quantize_to_gguf(save_directory, quantization_method = \"q4_k_m\"):\n",
|
1091 |
+
" from transformers.models.llama.modeling_llama import logger\n",
|
1092 |
+
" import os\n",
|
1093 |
+
"\n",
|
1094 |
+
" logger.warning_once(\n",
|
1095 |
+
" \"Unsloth: `colab_quantize_to_gguf` is still in development mode.\\n\"\\\n",
|
1096 |
+
" \"If anything errors or breaks, please file a ticket on Github.\\n\"\\\n",
|
1097 |
+
" \"Also, if you used this successfully, please tell us on Discord!\"\n",
|
1098 |
+
" )\n",
|
1099 |
+
"\n",
|
1100 |
+
" # From https://mlabonne.github.io/blog/posts/Quantize_Llama_2_models_using_ggml.html\n",
|
1101 |
+
" ALLOWED_QUANTS = \\\n",
|
1102 |
+
" {\n",
|
1103 |
+
" \"q2_k\" : \"Uses Q4_K for the attention.vw and feed_forward.w2 tensors, Q2_K for the other tensors.\",\n",
|
1104 |
+
" \"q3_k_l\" : \"Uses Q5_K for the attention.wv, attention.wo, and feed_forward.w2 tensors, else Q3_K\",\n",
|
1105 |
+
" \"q3_k_m\" : \"Uses Q4_K for the attention.wv, attention.wo, and feed_forward.w2 tensors, else Q3_K\",\n",
|
1106 |
+
" \"q3_k_s\" : \"Uses Q3_K for all tensors\",\n",
|
1107 |
+
" \"q4_0\" : \"Original quant method, 4-bit.\",\n",
|
1108 |
+
" \"q4_1\" : \"Higher accuracy than q4_0 but not as high as q5_0. However has quicker inference than q5 models.\",\n",
|
1109 |
+
" \"q4_k_m\" : \"Uses Q6_K for half of the attention.wv and feed_forward.w2 tensors, else Q4_K\",\n",
|
1110 |
+
" \"q4_k_s\" : \"Uses Q4_K for all tensors\",\n",
|
1111 |
+
" \"q5_0\" : \"Higher accuracy, higher resource usage and slower inference.\",\n",
|
1112 |
+
" \"q5_1\" : \"Even higher accuracy, resource usage and slower inference.\",\n",
|
1113 |
+
" \"q5_k_m\" : \"Uses Q6_K for half of the attention.wv and feed_forward.w2 tensors, else Q5_K\",\n",
|
1114 |
+
" \"q5_k_s\" : \"Uses Q5_K for all tensors\",\n",
|
1115 |
+
" \"q6_k\" : \"Uses Q8_K for all tensors\",\n",
|
1116 |
+
" \"q8_0\" : \"Almost indistinguishable from float16. High resource use and slow. Not recommended for most users.\",\n",
|
1117 |
+
" }\n",
|
1118 |
+
"\n",
|
1119 |
+
" if quantization_method not in ALLOWED_QUANTS.keys():\n",
|
1120 |
+
" error = f\"Unsloth: Quant method = [{quantization_method}] not supported. Choose from below:\\n\"\n",
|
1121 |
+
" for key, value in ALLOWED_QUANTS.items():\n",
|
1122 |
+
" error += f\"[{key}] => {value}\\n\"\n",
|
1123 |
+
" raise RuntimeError(error)\n",
|
1124 |
+
" pass\n",
|
1125 |
+
"\n",
|
1126 |
+
" print_info = \\\n",
|
1127 |
+
" f\"==((====))== Unsloth: Conversion from QLoRA to GGUF information\\n\"\\\n",
|
1128 |
+
" f\" \\\\\\ /| [0] Installing llama.cpp will take 3 minutes.\\n\"\\\n",
|
1129 |
+
" f\"O^O/ \\_/ \\\\ [1] Converting HF to GUUF 16bits will take 3 minutes.\\n\"\\\n",
|
1130 |
+
" f\"\\ / [2] Converting GGUF 16bits to q4_k_m will take 20 minutes.\\n\"\\\n",
|
1131 |
+
" f' \"-____-\" In total, you will have to wait around 26 minutes.\\n'\n",
|
1132 |
+
" print(print_info)\n",
|
1133 |
+
"\n",
|
1134 |
+
" if not os.path.exists(\"llama.cpp\"):\n",
|
1135 |
+
" print(\"Unsloth: [0] Installing llama.cpp. This will take 3 minutes...\")\n",
|
1136 |
+
" !git clone https://github.com/ggerganov/llama.cpp\n",
|
1137 |
+
" !cd llama.cpp && make clean && LLAMA_CUBLAS=1 make -j\n",
|
1138 |
+
" !pip install gguf protobuf\n",
|
1139 |
+
" pass\n",
|
1140 |
+
"\n",
|
1141 |
+
" print(\"Unsloth: [1] Converting HF into GGUF 16bit. This will take 3 minutes...\")\n",
|
1142 |
+
" !python llama.cpp/convert.py {save_directory} \\\n",
|
1143 |
+
" --outfile {save_directory}-unsloth.gguf \\\n",
|
1144 |
+
" --outtype f16\n",
|
1145 |
+
"\n",
|
1146 |
+
" print(\"Unsloth: [2] Converting GGUF 16bit into q4_k_m. This will take 20 minutes...\")\n",
|
1147 |
+
" final_location = f\"./{save_directory}-{quantization_method}-unsloth.gguf\"\n",
|
1148 |
+
" !./llama.cpp/quantize ./{save_directory}-unsloth.gguf \\\n",
|
1149 |
+
" {final_location} {quantization_method}\n",
|
1150 |
+
"\n",
|
1151 |
+
" print(f\"Unsloth: Output location: {final_location}\")\n",
|
1152 |
+
"pass\n"
|
1153 |
+
],
|
1154 |
+
"metadata": {
|
1155 |
+
"cellView": "form",
|
1156 |
+
"id": "nCVtR2ElF1GX"
|
1157 |
+
},
|
1158 |
+
"execution_count": null,
|
1159 |
+
"outputs": []
|
1160 |
+
},
|
1161 |
+
{
|
1162 |
+
"cell_type": "code",
|
1163 |
+
"source": [
|
1164 |
+
"from unsloth import unsloth_save_model\n",
|
1165 |
+
"\n",
|
1166 |
+
"# Change to `save locally` to save a float16 GGUF file or `\"save locally and quantize to 4bit\"`\n",
|
1167 |
+
"# to quantize down to 4bit\n",
|
1168 |
+
"SAVE_STRATEGY = \"none\"\n",
|
1169 |
+
"\n",
|
1170 |
+
"if SAVE_STRATEGY == \"save locally\":\n",
|
1171 |
+
"\n",
|
1172 |
+
" unsloth_save_model(model, tokenizer, \"output_model\")\n",
|
1173 |
+
"\n",
|
1174 |
+
"elif SAVE_STRATEGY == \"save locally and quantize to 4bit\":\n",
|
1175 |
+
"\n",
|
1176 |
+
" unsloth_save_model(model, tokenizer, \"output_model\")\n",
|
1177 |
+
" colab_quantize_to_gguf(\"output_model\", quantization_method = \"q4_k_m\")"
|
1178 |
+
],
|
1179 |
+
"metadata": {
|
1180 |
+
"id": "FqfebeAdT073"
|
1181 |
+
},
|
1182 |
+
"execution_count": null,
|
1183 |
+
"outputs": []
|
1184 |
+
},
|
1185 |
+
{
|
1186 |
+
"cell_type": "markdown",
|
1187 |
+
"source": [
|
1188 |
+
"Otherwise, to merge the LoRA adapters into the 4bit model:"
|
1189 |
+
],
|
1190 |
+
"metadata": {
|
1191 |
+
"id": "acUVCgzzU1Wv"
|
1192 |
+
}
|
1193 |
+
},
|
1194 |
+
{
|
1195 |
+
"cell_type": "code",
|
1196 |
+
"source": [
|
1197 |
+
"model = model.merge_and_unload()"
|
1198 |
+
],
|
1199 |
+
"metadata": {
|
1200 |
+
"id": "xcRjsZe0RK1b",
|
1201 |
+
"colab": {
|
1202 |
+
"base_uri": "https://localhost:8080/"
|
1203 |
+
},
|
1204 |
+
"outputId": "b40d4730-b9dc-4a4b-e42c-c191060ff66c"
|
1205 |
+
},
|
1206 |
+
"execution_count": null,
|
1207 |
+
"outputs": [
|
1208 |
+
{
|
1209 |
+
"output_type": "stream",
|
1210 |
+
"name": "stderr",
|
1211 |
+
"text": [
|
1212 |
+
"/usr/local/lib/python3.10/dist-packages/peft/tuners/lora/bnb.py:229: UserWarning: Merge lora module to 4-bit linear may get different generations due to rounding errors.\n",
|
1213 |
+
" warnings.warn(\n"
|
1214 |
+
]
|
1215 |
+
}
|
1216 |
+
]
|
1217 |
+
},
|
1218 |
{
|
1219 |
"cell_type": "markdown",
|
1220 |
"source": [
|
1221 |
"And we're done! If you have any questions on Unsloth, we have a [Discord](https://discord.gg/u54VK8m8tk) channel! If you find any bugs or want to keep updated with the latest LLM stuff, or need help, join projects etc, feel free to join our Discord!\n",
|
1222 |
+
"\n",
|
1223 |
+
"Some other links:\n",
|
1224 |
+
"1. Zephyr DPO 2x faster [free Colab](https://colab.research.google.com/drive/15vttTpzzVXv_tJwEk-hIcQ0S9FcEWvwP?usp=sharing)\n",
|
1225 |
+
"2. Llama 7b 2x faster [free Colab](https://colab.research.google.com/drive/1lBzz5KeZJKXjvivbYvmGarix9Ao6Wxe5?usp=sharing)\n",
|
1226 |
+
"3. TinyLlama 4x faster full Alpaca 52K in 1 hour [free Colab](https://colab.research.google.com/drive/1AZghoNBQaMDgWJpi4RbffGM1h6raLUj9?usp=sharing)\n",
|
1227 |
+
"4. CodeLlama 34b 2x faster [A100 on Colab](https://colab.research.google.com/drive/1y7A0AxE3y8gdj4AVkl2aZX47Xu3P1wJT?usp=sharing)\n",
|
1228 |
+
"5. Llama 7b [free Kaggle](https://www.kaggle.com/danielhanchen/unsloth-alpaca-t4-ddp)\n",
|
1229 |
+
"6. We also did a [blog](https://huggingface.co/blog/unsloth-trl) with 🤗 HuggingFace, and we're in the TRL [docs](https://huggingface.co/docs/trl/main/en/sft_trainer#accelerate-fine-tuning-2x-using-unsloth)!\n",
|
1230 |
+
"\n",
|
1231 |
"<div class=\"align-center\">\n",
|
1232 |
+
" <a href=\"https://github.com/unslothai/unsloth\"><img src=\"https://github.com/unslothai/unsloth/raw/main/images/unsloth%20new%20logo.png\" height=\"45\"></a>\n",
|
1233 |
+
" <a href=\"https://discord.gg/u54VK8m8tk\"><img src=\"https://github.com/unslothai/unsloth/raw/main/images/Discord.png\" height=\"45\"></a>\n",
|
1234 |
+
" <a href=\"https://ko-fi.com/unsloth\"><img src=\"https://github.com/unslothai/unsloth/raw/main/images/Kofi button.png\" height=\"45\"></a></a> Support our work if you can!!\n",
|
1235 |
"</div>"
|
1236 |
],
|
1237 |
"metadata": {
|