diff --git "a/TIES_LoRa_Merge_Script.ipynb" "b/TIES_LoRa_Merge_Script.ipynb" new file mode 100644--- /dev/null +++ "b/TIES_LoRa_Merge_Script.ipynb" @@ -0,0 +1,5061 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "colab": { + "provenance": [], + "gpuType": "T4" + }, + "kernelspec": { + "name": "python3", + "display_name": "Python 3" + }, + "language_info": { + "name": "python" + }, + "accelerator": "GPU" + }, + "cells": [ + { + "cell_type": "markdown", + "source": [ + "# Cast civitai trained LoRa in torch.bfloat16 to Tensor Art Compatible torch.float16 dtype\n", + "\n", + "Created by Adcom: https://tensor.art/u/743241123023077878" + ], + "metadata": { + "id": "YDCnQpDdqDe4" + } + }, + { + "cell_type": "code", + "source": [ + "#initialize\n", + "import torch\n", + "from safetensors.torch import load_file, save_file\n", + "from google.colab import drive\n", + "drive.mount('/content/drive')" + ], + "metadata": { + "id": "CBVTifA_ZwdC", + "colab": { + "base_uri": "https://localhost:8080/" + }, + "outputId": "aa3877d3-088e-423d-96b7-78befeab2734" + }, + "execution_count": 1, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "Mounted at /content/drive\n" + ] + } + ] + }, + { + "cell_type": "code", + "source": [ + "import torch\n", + "from safetensors.torch import load_file, save_file\n", + "import torch.nn as nn\n", + "from torch import linalg as LA\n", + "import os\n", + "import math\n", + "import random\n", + "import numpy as np\n", + "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n", + "\n", + "# filter_and_save\n", + "# Use this method to change the scale = (rank/alpha) value of a given _lora\n", + "# This method will also eliminate noise. All values < resolution * e-6 will be set to 0\n", + "# in the delta_W of this LoRa. The processed LoRa will be saved as a .safetensor file in fp16\n", + "# The rank of the LoRa affect the file size. At rank 32 the filesize is 300 MB , at rank 16 the filesize is 150 MB and so on.\n", + "#\n", + "# When merging LoRa , it is important that\n", + "# a) the scale of all the merged LoRas are the same. I use the scale = (alpha/rank) = 0.5 at all times.\n", + "# For rank 32 , the alpha must be 16 , for example.\n", + "#\n", + "# b) The rank of the merged LoRas should be 32 or below , any larger values might trigger a 'Out of Memory' error on Google Colab GPU:s\n", + "# --------------\n", + "# _lora - The lora which you wish to process\n", + "# savefile_name - The name of the savefile to be created. Make sure the savefile_name ends with the '.safetensors' suffix\n", + "# new_rank - The rank you wish to set the LoRa to\n", + "# new_alpha - The alpha value you wish to set the LoRa to. For proper scaling ,\n", + "# set the alpha value to half the value of the rank so (alpha/rank) = 0.5\n", + "# This is a very common scale for trained LoRa\n", + "#\n", + "# resolution - All values < resolution * e-6 will be set to 0\n", + "# in the delta_W of this LoRa. This is useful to eliminate 'junk' in the output of the\n", + "# Lora when scaling it to strength above 0.8. A high resolution will also make the Lora more compatible with other LoRa\n", + "# , at the expense of making the LoRa less true to the originally trained image output.\n", + "#\n", + "def filter_and_save(_lora , savefile_name, new_rank , new_alpha, resolution):\n", + " lora = {}\n", + " count = 0\n", + " for key in _lora:count = count + 1\n", + " NUM_ITEMS = count\n", + " count = 0\n", + " thresh = resolution*0.000001 # 1e-6\n", + " #-------#\n", + " for key in _lora:\n", + " if f'{key}'.find('alpha') > -1:\n", + " lora[f'{key}'] = torch.tensor(new_alpha).to(device = device , dtype = torch.float32)\n", + " count = count + 1\n", + " print(f'{count} / {NUM_ITEMS}')\n", + " continue\n", + " #------#\n", + " if not f'{key}'.find('lora_down') > -1: continue\n", + " up = f'{key}'.replace('lora_down' , 'lora_up')\n", + " down = f'{key}'\n", + " #-------#\n", + " delta_W = torch.matmul(_lora[up],_lora[down]).to(device = device , dtype=torch.float32)\n", + " #---#\n", + " N = delta_W.numel()\n", + " y = delta_W.flatten().to(device = device , dtype=torch.float32)\n", + " values,indices = torch.sort(y, descending = False) # smallest -> largest elements\n", + " y = torch.zeros(y.shape).to(device = device , dtype=torch.float32)\n", + " y[indices[values>thresh]] = 1\n", + " y[indices[values<-thresh]] = 1\n", + " y = y.unflatten(0,delta_W.shape).to(device = device , dtype=torch.float32)\n", + " delta_W = torch.mul(delta_W,y).to(device = device , dtype=torch.float32)\n", + " #------#\n", + " tmp={}\n", + " tmp['u'], tmp['s'], tmp['Vh'] = torch.svd(delta_W)\n", + " tmp['u'] = tmp['u'][:,: new_rank]\n", + " tmp['s'] = tmp['s'][: new_rank]\n", + " #-------#\n", + " tmp['u'] = torch.round(torch.matmul(tmp['u'], torch.diag(tmp['s'])),decimals=6)\n", + " tmp['Vh'] = torch.round(tmp['Vh'].t()[: new_rank,:],decimals=6)\n", + " #-------#\n", + " for key in tmp:tmp[f'{key}'] = tmp[f'{key}'].contiguous()\n", + " lora[up] = tmp['u'].to(device = device , dtype=torch.float32)\n", + " lora[down] = tmp['Vh'].to(device = device , dtype=torch.float32)\n", + " #-------#\n", + " count = count + 2\n", + " print(f'{count} / {NUM_ITEMS}')\n", + " #-------#\n", + " print(f'done!')\n", + " print(f'casting params to fp16....')\n", + " for key in _lora: lora[f'{key}'] = lora[f'{key}'].to(device = device , dtype=torch.float16)\n", + " #-------#\n", + " print(f'done!')\n", + " print(f'saving {savefile_name}...')\n", + " save_file(lora , f'{savefile_name}')\n", + "#--------#\n", + "\n", + "# count_zeros\n", + "# Use this method to guage how large resolution you should set for a given Lora.\n", + "# This function can serve as a 'preview' prior to running either the filter_and_save or\n", + "# merge_and_save methods. Since it does not use SVD to re-pack the LoRa\n", + "# , you can run this method on a non-GPU instance on the Colab\n", + "#-----------\n", + "# _lora - The lora which you wish to process\n", + "# resolution - All values < resolution * e-6 will be set to 0\n", + "def count_zeros(_lora, resolution):\n", + " count = 0\n", + " for key in _lora:count = count + 1\n", + " NUM_ITEMS = count\n", + " count = 0\n", + " #-----#\n", + " thresh = resolution*0.000001 # 1e-6\n", + "\n", + " print(f'at resolution = {resolution}e-6 :')\n", + " for key in _lora:\n", + " if f'{key}'.find('alpha') > -1:\n", + " count = count + 1\n", + " continue\n", + " #------#\n", + " if not f'{key}'.find('lora_down') > -1: continue\n", + " up = f'{key}'.replace('lora_down' , 'lora_up')\n", + " down = f'{key}'\n", + " #-------#\n", + " delta_W = torch.matmul(_lora[up],_lora[down]).to(device = device , dtype=torch.float32)\n", + " N = delta_W.numel()\n", + " y = delta_W.flatten().to(device = device , dtype=torch.float32)\n", + " values,indices = torch.sort(y, descending = False) # smallest -> largest elements\n", + " y = torch.ones(y.shape).to(device = device , dtype=torch.float32)\n", + " y[indices[values>thresh]] = 0\n", + " neg_pcnt = round((100*torch.sum(y) / N).item(),2)\n", + " y[indices[values<-thresh]] = 0\n", + " count = count + 2\n", + " pcnt = round((100*torch.sum(y) / N).item(),2)\n", + " neg_pcnt = round(neg_pcnt - pcnt,2) # remove zero % from neg_pcnt\n", + " pos_pcnt = round(100- pcnt - neg_pcnt,2)\n", + " print(f'at {count} / {NUM_ITEMS} : {pcnt} % zeros ,{pos_pcnt} % pos. , {neg_pcnt} % neg ')\n", + " #------#\n", + "#-----#\n", + "\n", + "# This method rescales a _lora to a given ratio. I haven't tested it\n", + "# But this is more or less how it works\n", + "def rescale_and_save(_lora , savefile_name, new_ratio):\n", + " count = 0\n", + " lora = {}\n", + " for key in _lora:count = count + 1\n", + " NUM_ITEMS = count\n", + " count = 0\n", + " decimals = 6\n", + " for key in _lora:\n", + " if not f'{key}'.find('alpha') > -1: continue\n", + " alpha = f'{key}'\n", + " up = f'{key}'.replace('alpha' , 'lora_up')\n", + " down = f'{key}'.replace('alpha' , 'lora_down')\n", + " #------#\n", + " rank = torch.matmul(_lora[up]*0,_lora[down]*0).shape[0]\n", + " new_alpha = torch.tensor(new_ratio*rank).to(device = device , dtype=torch.float32)\n", + " lora[up] = torch.round(torch.sqrt(_lora[alpha]/new_alpha)*_lora[up], decimals = decimals).to(device = device , dtype=torch.float32)\n", + " lora[down] = torch.round(torch.sqrt(_lora[alpha]/new_alpha)*_lora[down], decimals = decimals).to(device = device , dtype=torch.float32)\n", + " lora[alpha] = (new_alpha/_lora[alpha])*_lora[alpha].to(device = device , dtype=torch.float32)\n", + " count = count + 3\n", + " print(f'{count} / {NUM_ITEMS}')\n", + " #--------#\n", + " print(f'done!')\n", + " print(f'casting params to fp16....')\n", + " for key in lora: lora[f'{key}'] = lora[f'{key}'].to(device = device , dtype=torch.float16)\n", + " #-------#\n", + " print(f'done!')\n", + " print(f'saving {savefile_name}...')\n", + " save_file(lora , f'{savefile_name}')\n", + " #-----------#\n", + "\n", + "# merge_and_save\n", + "# This method uses a general neural net merging method known as TIES - which is an loose abbreviation for\n", + "# 'Trim Elect Sign & Merge' according to the paper : https://arxiv.org/pdf/2306.01708\n", + "#------------#\n", + "# _lora1 - The lora which you wish to process.\n", + "# _lora2 - The lora which you wish to process.\n", + "# _lora3 - The lora which you wish to process.\n", + "\n", + "# NOTE about loras :\n", + "#_lora1 , _lora2 and lora_3 can have different ranks.\n", + "# Make sure the scale of all three loras are the same\n", + "\n", + "#The scale is defined as (alpha/rank) and should be 0.5\n", + "# If the alpha value is too high or too low , for example if (alpha/rank) = 1\n", + "# then run rescale_and_save(_lora , savefile_name, new_alpha) , where new_alpha is given by the rank of the LoRa\n", + "# For example , a LoRa of rank 32 must have an alpha value of 16 for the scale = (alpha/rank) = 0.5 to be True\n", + "\n", + "# However , make sure each lora rank is equal or below 32 ,\n", + "#or that the sum of ranks does not exceed 3*32 = 96 , to not exceed GPU requirements on Google Colab. Slightly higher values might be fine.\n", + "# Haven't tested it since I prefer merging LoRa at rank 32\n", + "\n", + "# savefile_name - The name of the savefile to be created. Make sure the savefile_name ends with the '.safetensors' suffix\n", + "# new_rank - The rank you wish to set the LoRa to\n", + "# new_alpha - The alpha value you wish to set the LoRa to. For proper scaling ,\n", + "# set the alpha value to half the value of the rank so (alpha/rank) = 0.5\n", + "# This is a very common scale for trained LoRa\n", + "#\n", + "# resolution - All values < resolution * e-6 will be set to 0\n", + "# in the delta_W of this LoRa. This is useful to eliminate 'junk' in the output of the\n", + "# Lora when scaling it to strength above 0.8. A high resolution will also make the Lora more compatible with other LoRa\n", + "# , at the expense of making the LoRa less true to the originally trained image output.\n", + "def merge_and_save(_lora1 , _lora2 , _lora3, savefile_name, new_rank , new_alpha, resolution):\n", + " lora = {}\n", + " count = 0\n", + " for key in _lora1:count = count + 1\n", + " NUM_ITEMS = count\n", + " count = 0\n", + " thresh = resolution*0.000001 # 1e-6\n", + " decimals = 6\n", + "\n", + " #-------#\n", + " for key in _lora1:\n", + " if f'{key}'.find('alpha') > -1:\n", + " lora[f'{key}'] = torch.tensor(new_alpha).to(device = device , dtype = torch.float32)\n", + " count = count + 1\n", + " print(f'{count} / {NUM_ITEMS}')\n", + " continue\n", + " #------#\n", + " if not f'{key}'.find('lora_down') > -1: continue\n", + " up = f'{key}'.replace('lora_down' , 'lora_up')\n", + " down = f'{key}'\n", + " #-------#\n", + "\n", + " # Setup\n", + " delta_W = torch.matmul(_lora1[up]*0,_lora1[down]*0).to(device = device, dtype=torch.float32)\n", + " tgt_shape = delta_W.shape\n", + " N = delta_W.numel()\n", + " delta_W = torch.zeros(N).to(device = device , dtype=torch.float32)\n", + " #-----#\n", + "\n", + " #Positives\n", + " Y = torch.zeros(3,N).to(device = device , dtype=torch.float32)\n", + " Y[0] = torch.matmul(_lora1[up],_lora1[down]).flatten().to(device = device , dtype=torch.float32)\n", + " Y[1] = torch.matmul(_lora2[up],_lora2[down]).flatten().to(device = device , dtype=torch.float32)\n", + " Y[2] = torch.matmul(_lora3[up],_lora3[down]).flatten().to(device = device , dtype=torch.float32)\n", + " Y[torch.abs(Y)0,dim=1) + 0.001\n", + " elect = torch.sum(Y<0,dim=1) + 0.001\n", + " elect = (num>=elect)\n", + " Y[Y<0] = 0\n", + " Y = torch.sum(Y, dim=1).to(device = device , dtype=torch.float32)\n", + " delta_W[elect] = torch.round((Y[elect]/num[elect]),decimals=decimals).to(device = device , dtype=torch.float32)\n", + " #-----#\n", + "\n", + " #Negatives\n", + " Y = torch.zeros(3,N).to(device = device , dtype=torch.float32)\n", + " Y[0] = torch.matmul(_lora1[up],_lora1[down]).flatten().to(device = device , dtype=torch.float32)\n", + " Y[1] = torch.matmul(_lora2[up],_lora2[down]).flatten().to(device = device , dtype=torch.float32)\n", + " Y[2] = torch.matmul(_lora3[up],_lora3[down]).flatten().to(device = device , dtype=torch.float32)\n", + " Y[torch.abs(Y)0,dim=1) + 0.001\n", + " elect = (elect0] = 0\n", + " Y = torch.sum(Y, dim=1).to(device = device , dtype=torch.float32)\n", + " delta_W[elect] = torch.round(Y[elect]/num[elect],decimals=decimals).to(device = device , dtype=torch.float32)\n", + " #----#\n", + "\n", + " # Free up memory prior to SVD\n", + " delta_W = delta_W.unflatten(0,tgt_shape).to(device = device , dtype=torch.float32)\n", + " delta_W = delta_W.clone().detach()\n", + " Y = {}\n", + " num = {}\n", + " elect = {}\n", + " #-----#\n", + "\n", + " # Run SVD (Single Value Decomposition)\n", + " #to get the new lora_up and lora_down for delta_W\n", + " tmp={}\n", + " tmp['u'], tmp['s'], tmp['Vh'] = torch.svd(delta_W)\n", + " tmp['u'] = tmp['u'][:,: new_rank]\n", + " tmp['s'] = tmp['s'][: new_rank]\n", + " tmp['u'] = torch.matmul(tmp['u'], torch.diag(tmp['s']))\n", + " tmp['Vh'] = tmp['Vh'].t()[: new_rank,:]\n", + " for key in tmp:tmp[f'{key}'] = tmp[f'{key}'].contiguous()\n", + " lora[up] = torch.round(tmp['u'],decimals=decimals).to(device = device , dtype=torch.float32)\n", + " lora[down] = torch.round(tmp['Vh'],decimals=decimals).to(device = device , dtype=torch.float32)\n", + " #-------#\n", + "\n", + " count = count + 2\n", + " print(f'{count} / {NUM_ITEMS}')\n", + " #----#\n", + " #--------#\n", + " print(f'done!')\n", + " print(f'casting params to fp16....')\n", + " for key in lora: lora[f'{key}'] = lora[f'{key}'].to(device = device , dtype=torch.float16)\n", + " #-------#\n", + " print(f'done!')\n", + " print(f'saving {savefile_name}...')\n", + " save_file(lora , f'{savefile_name}')\n", + "#------#\n", + "\n", + "new_rank = 32\n", + "new_alpha = math.floor(new_rank/2)\n", + "resolution = 200\n", + "name = 'scale'\n", + "yeero = load_file('/content/drive/MyDrive/Saved from Chrome/scale.safetensors')\n", + "euro = load_file('/content/drive/MyDrive/Saved from Chrome/euro_100_r32_16alpha.safetensors')\n", + "puff = load_file('/content/drive/MyDrive/Saved from Chrome/buff_200_r32_16alpha.safetensors')\n", + "savefile_name = f'{name}_{resolution}_r{new_rank}_a{new_alpha}.safetensors'\n", + "\n", + "for key in yeero:\n", + " yeero[f'{key}'] = yeero[f'{key}'].to(device = device , dtype = torch.float32)\n", + " euro[f'{key}'] = euro[f'{key}'].to(device = device , dtype = torch.float32)\n", + " puff[f'{key}'] = puff[f'{key}'].to(device = device , dtype = torch.float32)\n", + "#-----#\n", + "print(f'for {name}.safetensors at scale = (rank/alpha) = 0.5')\n", + "#merge_and_save(yeero , euro , puff, savefile_name, new_rank , new_alpha, resolution)\n", + "\n", + "filter_and_save(yeero , savefile_name, new_rank , new_alpha, resolution)\n" + ], + "metadata": { + "id": "SKYzFxehkfG8", + "colab": { + "base_uri": "https://localhost:8080/" + }, + "outputId": "92d90a82-65e0-45ac-be1c-18d96ff95459" + }, + "execution_count": 6, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "for scale.safetensors at scale = (rank/alpha) = 0.5\n", + "1 / 912\n", + "3 / 912\n", + "4 / 912\n", + "6 / 912\n", + "7 / 912\n", + "9 / 912\n", + "10 / 912\n", + "12 / 912\n", + "13 / 912\n", + "15 / 912\n", + "16 / 912\n", + "18 / 912\n", + "19 / 912\n", + "21 / 912\n", + "22 / 912\n", + "24 / 912\n", + "25 / 912\n", + "27 / 912\n", + "28 / 912\n", + "30 / 912\n", + "31 / 912\n", + "33 / 912\n", + "34 / 912\n", + "36 / 912\n", + "37 / 912\n", + "39 / 912\n", + "40 / 912\n", + "42 / 912\n", + "43 / 912\n", + "45 / 912\n", + "46 / 912\n", + "48 / 912\n", + "49 / 912\n", + "51 / 912\n", + "52 / 912\n", + "54 / 912\n", + "55 / 912\n", + "57 / 912\n", + "58 / 912\n", + "60 / 912\n", + "61 / 912\n", + "63 / 912\n", + "64 / 912\n", + "66 / 912\n", + "67 / 912\n", + "69 / 912\n", + "70 / 912\n", + "72 / 912\n", + "73 / 912\n", + "75 / 912\n", + "76 / 912\n", + "78 / 912\n", + "79 / 912\n", + "81 / 912\n", + "82 / 912\n", + "84 / 912\n", + "85 / 912\n", + "87 / 912\n", + "88 / 912\n", + "90 / 912\n", + "91 / 912\n", + "93 / 912\n", + "94 / 912\n", + "96 / 912\n", + "97 / 912\n", + "99 / 912\n", + "100 / 912\n", + "102 / 912\n", + "103 / 912\n", + "105 / 912\n", + "106 / 912\n", + "108 / 912\n", + "109 / 912\n", + "111 / 912\n", + "112 / 912\n", + "114 / 912\n", + "115 / 912\n", + "117 / 912\n", + "118 / 912\n", + "120 / 912\n", + "121 / 912\n", + "123 / 912\n", + "124 / 912\n", + "126 / 912\n", + "127 / 912\n", + "129 / 912\n", + "130 / 912\n", + "132 / 912\n", + "133 / 912\n", + "135 / 912\n", + "136 / 912\n", + "138 / 912\n", + "139 / 912\n", + "141 / 912\n", + "142 / 912\n", + "144 / 912\n", + "145 / 912\n", + "147 / 912\n", + "148 / 912\n", + "150 / 912\n", + "151 / 912\n", + "153 / 912\n", + "154 / 912\n", + "156 / 912\n", + "157 / 912\n", + "159 / 912\n", + "160 / 912\n", + "162 / 912\n", + "163 / 912\n", + "165 / 912\n", + "166 / 912\n", + "168 / 912\n", + "169 / 912\n", + "171 / 912\n", + "172 / 912\n", + "174 / 912\n", + "175 / 912\n", + "177 / 912\n", + "178 / 912\n", + "180 / 912\n", + "181 / 912\n", + "183 / 912\n", + "184 / 912\n", + "186 / 912\n", + "187 / 912\n", + "189 / 912\n", + "190 / 912\n", + "192 / 912\n", + "193 / 912\n", + "195 / 912\n", + "196 / 912\n", + "198 / 912\n", + "199 / 912\n", + "201 / 912\n", + "202 / 912\n", + "204 / 912\n", + "205 / 912\n", + "207 / 912\n", + "208 / 912\n", + "210 / 912\n", + "211 / 912\n", + "213 / 912\n", + "214 / 912\n", + "216 / 912\n", + "217 / 912\n", + "219 / 912\n", + "220 / 912\n", + "222 / 912\n", + "223 / 912\n", + "225 / 912\n", + "226 / 912\n", + "228 / 912\n", + "229 / 912\n", + "231 / 912\n", + "232 / 912\n", + "234 / 912\n", + "235 / 912\n", + "237 / 912\n", + "238 / 912\n", + "240 / 912\n", + "241 / 912\n", + "243 / 912\n", + "244 / 912\n", + "246 / 912\n", + "247 / 912\n", + "249 / 912\n", + "250 / 912\n", + "252 / 912\n", + "253 / 912\n", + "255 / 912\n", + "256 / 912\n", + "258 / 912\n", + "259 / 912\n", + "261 / 912\n", + "262 / 912\n", + "264 / 912\n", + "265 / 912\n", + "267 / 912\n", + "268 / 912\n", + "270 / 912\n", + "271 / 912\n", + "273 / 912\n", + "274 / 912\n", + "276 / 912\n", + "277 / 912\n", + "279 / 912\n", + "280 / 912\n", + "282 / 912\n", + "283 / 912\n", + "285 / 912\n", + "286 / 912\n", + "288 / 912\n", + "289 / 912\n", + "291 / 912\n", + "292 / 912\n", + "294 / 912\n", + "295 / 912\n", + "297 / 912\n", + "298 / 912\n", + "300 / 912\n", + "301 / 912\n", + "303 / 912\n", + "304 / 912\n", + "306 / 912\n", + "307 / 912\n", + "309 / 912\n", + "310 / 912\n", + "312 / 912\n", + "313 / 912\n", + "315 / 912\n", + "316 / 912\n", + "318 / 912\n", + "319 / 912\n", + "321 / 912\n", + "322 / 912\n", + "324 / 912\n", + "325 / 912\n", + "327 / 912\n", + "328 / 912\n", + "330 / 912\n", + "331 / 912\n", + "333 / 912\n", + "334 / 912\n", + "336 / 912\n", + "337 / 912\n", + "339 / 912\n", + "340 / 912\n", + "342 / 912\n", + "343 / 912\n", + "345 / 912\n", + "346 / 912\n", + "348 / 912\n", + "349 / 912\n", + "351 / 912\n", + "352 / 912\n", + "354 / 912\n", + "355 / 912\n", + "357 / 912\n", + "358 / 912\n", + "360 / 912\n", + "361 / 912\n", + "363 / 912\n", + "364 / 912\n", + "366 / 912\n", + "367 / 912\n", + "369 / 912\n", + "370 / 912\n", + "372 / 912\n", + "373 / 912\n", + "375 / 912\n", + "376 / 912\n", + "378 / 912\n", + "379 / 912\n", + "381 / 912\n", + "382 / 912\n", + "384 / 912\n", + "385 / 912\n", + "387 / 912\n", + "388 / 912\n", + "390 / 912\n", + "391 / 912\n", + "393 / 912\n", + "394 / 912\n", + "396 / 912\n", + "397 / 912\n", + "399 / 912\n", + "400 / 912\n", + "402 / 912\n", + "403 / 912\n", + "405 / 912\n", + "406 / 912\n", + "408 / 912\n", + "409 / 912\n", + "411 / 912\n", + "412 / 912\n", + "414 / 912\n", + "415 / 912\n", + "417 / 912\n", + "418 / 912\n", + "420 / 912\n", + "421 / 912\n", + "423 / 912\n", + "424 / 912\n", + "426 / 912\n", + "427 / 912\n", + "429 / 912\n", + "430 / 912\n", + "432 / 912\n", + "433 / 912\n", + "435 / 912\n", + "436 / 912\n", + "438 / 912\n", + "439 / 912\n", + "441 / 912\n", + "442 / 912\n", + "444 / 912\n", + "445 / 912\n", + "447 / 912\n", + "448 / 912\n", + "450 / 912\n", + "451 / 912\n", + "453 / 912\n", + "454 / 912\n", + "456 / 912\n", + "457 / 912\n", + "459 / 912\n", + "460 / 912\n", + "462 / 912\n", + "463 / 912\n", + "465 / 912\n", + "466 / 912\n", + "468 / 912\n", + "469 / 912\n", + "471 / 912\n", + "472 / 912\n", + "474 / 912\n", + "475 / 912\n", + "477 / 912\n", + "478 / 912\n", + "480 / 912\n", + "481 / 912\n", + "483 / 912\n", + "484 / 912\n", + "486 / 912\n", + "487 / 912\n", + "489 / 912\n", + "490 / 912\n", + "492 / 912\n", + "493 / 912\n", + "495 / 912\n", + "496 / 912\n", + "498 / 912\n", + "499 / 912\n", + "501 / 912\n", + "502 / 912\n", + "504 / 912\n", + "505 / 912\n", + "507 / 912\n", + "508 / 912\n", + "510 / 912\n", + "511 / 912\n", + "513 / 912\n", + "514 / 912\n", + "516 / 912\n", + "517 / 912\n", + "519 / 912\n", + "520 / 912\n", + "522 / 912\n", + "523 / 912\n", + "525 / 912\n", + "526 / 912\n", + "528 / 912\n", + "529 / 912\n", + "531 / 912\n", + "532 / 912\n", + "534 / 912\n", + "535 / 912\n", + "537 / 912\n", + "538 / 912\n", + "540 / 912\n", + "541 / 912\n", + "543 / 912\n", + "544 / 912\n", + "546 / 912\n", + "547 / 912\n", + "549 / 912\n", + "550 / 912\n", + "552 / 912\n", + "553 / 912\n", + "555 / 912\n", + "556 / 912\n", + "558 / 912\n", + "559 / 912\n", + "561 / 912\n", + "562 / 912\n", + "564 / 912\n", + "565 / 912\n", + "567 / 912\n", + "568 / 912\n", + "570 / 912\n", + "571 / 912\n", + "573 / 912\n", + "574 / 912\n", + "576 / 912\n", + "577 / 912\n", + "579 / 912\n", + "580 / 912\n", + "582 / 912\n", + "583 / 912\n", + "585 / 912\n", + "586 / 912\n", + "588 / 912\n", + "589 / 912\n", + "591 / 912\n", + "592 / 912\n", + "594 / 912\n", + "595 / 912\n", + "597 / 912\n", + "598 / 912\n", + "600 / 912\n", + "601 / 912\n", + "603 / 912\n", + "604 / 912\n", + "606 / 912\n", + "607 / 912\n", + "609 / 912\n", + "610 / 912\n", + "612 / 912\n", + "613 / 912\n", + "615 / 912\n", + "616 / 912\n", + "618 / 912\n", + "619 / 912\n", + "621 / 912\n", + "622 / 912\n", + "624 / 912\n", + "625 / 912\n", + "627 / 912\n", + "628 / 912\n", + "630 / 912\n", + "631 / 912\n", + "633 / 912\n", + "634 / 912\n", + "636 / 912\n", + "637 / 912\n", + "639 / 912\n", + "640 / 912\n", + "642 / 912\n", + "643 / 912\n", + "645 / 912\n", + "646 / 912\n", + "648 / 912\n", + "649 / 912\n", + "651 / 912\n", + "652 / 912\n", + "654 / 912\n", + "655 / 912\n", + "657 / 912\n", + "658 / 912\n", + "660 / 912\n", + "661 / 912\n", + "663 / 912\n", + "664 / 912\n", + "666 / 912\n", + "667 / 912\n", + "669 / 912\n", + "670 / 912\n", + "672 / 912\n", + "673 / 912\n", + "675 / 912\n", + "676 / 912\n", + "678 / 912\n", + "679 / 912\n", + "681 / 912\n", + "682 / 912\n", + "684 / 912\n", + "685 / 912\n", + "687 / 912\n", + "688 / 912\n", + "690 / 912\n", + "691 / 912\n", + "693 / 912\n", + "694 / 912\n", + "696 / 912\n", + "697 / 912\n", + "699 / 912\n", + "700 / 912\n", + "702 / 912\n", + "703 / 912\n", + "705 / 912\n", + "706 / 912\n", + "708 / 912\n", + "709 / 912\n", + "711 / 912\n", + "712 / 912\n", + "714 / 912\n", + "715 / 912\n", + "717 / 912\n", + "718 / 912\n", + "720 / 912\n", + "721 / 912\n", + "723 / 912\n", + "724 / 912\n", + "726 / 912\n", + "727 / 912\n", + "729 / 912\n", + "730 / 912\n", + "732 / 912\n", + "733 / 912\n", + "735 / 912\n", + "736 / 912\n", + "738 / 912\n", + "739 / 912\n", + "741 / 912\n", + "742 / 912\n", + "744 / 912\n", + "745 / 912\n", + "747 / 912\n", + "748 / 912\n", + "750 / 912\n", + "751 / 912\n", + "753 / 912\n", + "754 / 912\n", + "756 / 912\n", + "757 / 912\n", + "759 / 912\n", + "760 / 912\n", + "762 / 912\n", + "763 / 912\n", + "765 / 912\n", + "766 / 912\n", + "768 / 912\n", + "769 / 912\n", + "771 / 912\n", + "772 / 912\n", + "774 / 912\n", + "775 / 912\n", + "777 / 912\n", + "778 / 912\n", + "780 / 912\n", + "781 / 912\n", + "783 / 912\n", + "784 / 912\n", + "786 / 912\n", + "787 / 912\n", + "789 / 912\n", + "790 / 912\n", + "792 / 912\n", + "793 / 912\n", + "795 / 912\n", + "796 / 912\n", + "798 / 912\n", + "799 / 912\n", + "801 / 912\n", + "802 / 912\n", + "804 / 912\n", + "805 / 912\n", + "807 / 912\n", + "808 / 912\n", + "810 / 912\n", + "811 / 912\n", + "813 / 912\n", + "814 / 912\n", + "816 / 912\n", + "817 / 912\n", + "819 / 912\n", + "820 / 912\n", + "822 / 912\n", + "823 / 912\n", + "825 / 912\n", + "826 / 912\n", + "828 / 912\n", + "829 / 912\n", + "831 / 912\n", + "832 / 912\n", + "834 / 912\n", + "835 / 912\n", + "837 / 912\n", + "838 / 912\n", + "840 / 912\n", + "841 / 912\n", + "843 / 912\n", + "844 / 912\n", + "846 / 912\n", + "847 / 912\n", + "849 / 912\n", + "850 / 912\n", + "852 / 912\n", + "853 / 912\n", + "855 / 912\n", + "856 / 912\n", + "858 / 912\n", + "859 / 912\n", + "861 / 912\n", + "862 / 912\n", + "864 / 912\n", + "865 / 912\n", + "867 / 912\n", + "868 / 912\n", + "870 / 912\n", + "871 / 912\n", + "873 / 912\n", + "874 / 912\n", + "876 / 912\n", + "877 / 912\n", + "879 / 912\n", + "880 / 912\n", + "882 / 912\n", + "883 / 912\n", + "885 / 912\n", + "886 / 912\n", + "888 / 912\n", + "889 / 912\n", + "891 / 912\n", + "892 / 912\n", + "894 / 912\n", + "895 / 912\n", + "897 / 912\n", + "898 / 912\n", + "900 / 912\n", + "901 / 912\n", + "903 / 912\n", + "904 / 912\n", + "906 / 912\n", + "907 / 912\n", + "909 / 912\n", + "910 / 912\n", + "912 / 912\n", + "done!\n", + "casting params to fp16....\n", + "done!\n", + "saving scale_200_r32_a16.safetensors...\n" + ] + } + ] + }, + { + "cell_type": "code", + "source": [ + "new_rank = 32\n", + "new_alpha = math.floor(new_rank/2)\n", + "resolution = 200\n", + "name = 'star_euro_scale'\n", + "a = load_file('/content/drive/MyDrive/Saved from Chrome/star_100_r32_16alpha.safetensors')\n", + "b = load_file('/content/drive/MyDrive/Saved from Chrome/euro_100_r32_16alpha.safetensors')\n", + "c = load_file('/content/scale_200_r32_a16.safetensors')\n", + "savefile_name = f'{name}_{resolution}_r{new_rank}_a{new_alpha}.safetensors'\n", + "\n", + "#tgt = load_file(f'/kaggle/input/flux-loras/{name}_{resolution}_r32_16alpha.safetensors')\n", + "for key in yeero:\n", + " a[f'{key}'] = a[f'{key}'].to(device = device , dtype = torch.float32)\n", + " b[f'{key}'] = b[f'{key}'].to(device = device , dtype = torch.float32)\n", + " c[f'{key}'] = c[f'{key}'].to(device = device , dtype = torch.float32)\n", + "#-----#\n", + "print(f'for {name}.safetensors at scale = (rank/alpha) = 0.5')\n", + "merge_and_save(a,b,c, savefile_name, new_rank , new_alpha, resolution)" + ], + "metadata": { + "id": "l9RX4PLtqzkZ", + "outputId": "261ab4f5-972f-451e-a097-9ca9c14c9539", + "colab": { + "base_uri": "https://localhost:8080/" + } + }, + "execution_count": 7, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "for star_euro_scale.safetensors at scale = (rank/alpha) = 0.5\n", + "1 / 912\n", + "3 / 912\n", + "4 / 912\n", + "6 / 912\n", + "7 / 912\n", + "9 / 912\n", + "10 / 912\n", + "12 / 912\n", + "13 / 912\n", + "15 / 912\n", + "16 / 912\n", + "18 / 912\n", + "19 / 912\n", + "21 / 912\n", + "22 / 912\n", + "24 / 912\n", + "25 / 912\n", + "27 / 912\n", + "28 / 912\n", + "30 / 912\n", + "31 / 912\n", + "33 / 912\n", + "34 / 912\n", + "36 / 912\n", + "37 / 912\n", + "39 / 912\n", + "40 / 912\n", + "42 / 912\n", + "43 / 912\n", + "45 / 912\n", + "46 / 912\n", + "48 / 912\n", + "49 / 912\n", + "51 / 912\n", + "52 / 912\n", + "54 / 912\n", + "55 / 912\n", + "57 / 912\n", + "58 / 912\n", + "60 / 912\n", + "61 / 912\n", + "63 / 912\n", + "64 / 912\n", + "66 / 912\n", + "67 / 912\n", + "69 / 912\n", + "70 / 912\n", + "72 / 912\n", + "73 / 912\n", + "75 / 912\n", + "76 / 912\n", + "78 / 912\n", + "79 / 912\n", + "81 / 912\n", + "82 / 912\n", + "84 / 912\n", + "85 / 912\n", + "87 / 912\n", + "88 / 912\n", + "90 / 912\n", + "91 / 912\n", + "93 / 912\n", + "94 / 912\n", + "96 / 912\n", + "97 / 912\n", + "99 / 912\n", + "100 / 912\n", + "102 / 912\n", + "103 / 912\n", + "105 / 912\n", + "106 / 912\n", + "108 / 912\n", + "109 / 912\n", + "111 / 912\n", + "112 / 912\n", + "114 / 912\n", + "115 / 912\n", + "117 / 912\n", + "118 / 912\n", + "120 / 912\n", + "121 / 912\n", + "123 / 912\n", + "124 / 912\n", + "126 / 912\n", + "127 / 912\n", + "129 / 912\n", + "130 / 912\n", + "132 / 912\n", + "133 / 912\n", + "135 / 912\n", + "136 / 912\n", + "138 / 912\n", + "139 / 912\n", + "141 / 912\n", + "142 / 912\n", + "144 / 912\n", + "145 / 912\n", + "147 / 912\n", + "148 / 912\n", + "150 / 912\n", + "151 / 912\n", + "153 / 912\n", + "154 / 912\n", + "156 / 912\n", + "157 / 912\n", + "159 / 912\n", + "160 / 912\n", + "162 / 912\n", + "163 / 912\n", + "165 / 912\n", + "166 / 912\n", + "168 / 912\n", + "169 / 912\n", + "171 / 912\n", + "172 / 912\n", + "174 / 912\n", + "175 / 912\n", + "177 / 912\n", + "178 / 912\n", + "180 / 912\n", + "181 / 912\n", + "183 / 912\n", + "184 / 912\n", + "186 / 912\n", + "187 / 912\n", + "189 / 912\n", + "190 / 912\n", + "192 / 912\n", + "193 / 912\n", + "195 / 912\n", + "196 / 912\n", + "198 / 912\n", + "199 / 912\n", + "201 / 912\n", + "202 / 912\n", + "204 / 912\n", + "205 / 912\n", + "207 / 912\n", + "208 / 912\n", + "210 / 912\n", + "211 / 912\n", + "213 / 912\n", + "214 / 912\n", + "216 / 912\n", + "217 / 912\n", + "219 / 912\n", + "220 / 912\n", + "222 / 912\n", + "223 / 912\n", + "225 / 912\n", + "226 / 912\n", + "228 / 912\n", + "229 / 912\n", + "231 / 912\n", + "232 / 912\n", + "234 / 912\n", + "235 / 912\n", + "237 / 912\n", + "238 / 912\n", + "240 / 912\n", + "241 / 912\n", + "243 / 912\n", + "244 / 912\n", + "246 / 912\n", + "247 / 912\n", + "249 / 912\n", + "250 / 912\n", + "252 / 912\n", + "253 / 912\n", + "255 / 912\n", + "256 / 912\n", + "258 / 912\n", + "259 / 912\n", + "261 / 912\n", + "262 / 912\n", + "264 / 912\n", + "265 / 912\n", + "267 / 912\n", + "268 / 912\n", + "270 / 912\n", + "271 / 912\n", + "273 / 912\n", + "274 / 912\n", + "276 / 912\n", + "277 / 912\n", + "279 / 912\n", + "280 / 912\n", + "282 / 912\n", + "283 / 912\n", + "285 / 912\n", + "286 / 912\n", + "288 / 912\n", + "289 / 912\n", + "291 / 912\n", + "292 / 912\n", + "294 / 912\n", + "295 / 912\n", + "297 / 912\n", + "298 / 912\n", + "300 / 912\n", + "301 / 912\n", + "303 / 912\n", + "304 / 912\n", + "306 / 912\n", + "307 / 912\n", + "309 / 912\n", + "310 / 912\n", + "312 / 912\n", + "313 / 912\n", + "315 / 912\n", + "316 / 912\n", + "318 / 912\n", + "319 / 912\n", + "321 / 912\n", + "322 / 912\n", + "324 / 912\n", + "325 / 912\n", + "327 / 912\n", + "328 / 912\n", + "330 / 912\n", + "331 / 912\n", + "333 / 912\n", + "334 / 912\n", + "336 / 912\n", + "337 / 912\n", + "339 / 912\n", + "340 / 912\n", + "342 / 912\n", + "343 / 912\n", + "345 / 912\n", + "346 / 912\n", + "348 / 912\n", + "349 / 912\n", + "351 / 912\n", + "352 / 912\n", + "354 / 912\n", + "355 / 912\n", + "357 / 912\n", + "358 / 912\n", + "360 / 912\n", + "361 / 912\n", + "363 / 912\n", + "364 / 912\n", + "366 / 912\n", + "367 / 912\n", + "369 / 912\n", + "370 / 912\n", + "372 / 912\n", + "373 / 912\n", + "375 / 912\n", + "376 / 912\n", + "378 / 912\n", + "379 / 912\n", + "381 / 912\n", + "382 / 912\n", + "384 / 912\n", + "385 / 912\n", + "387 / 912\n", + "388 / 912\n", + "390 / 912\n", + "391 / 912\n", + "393 / 912\n", + "394 / 912\n", + "396 / 912\n", + "397 / 912\n", + "399 / 912\n", + "400 / 912\n", + "402 / 912\n", + "403 / 912\n", + "405 / 912\n", + "406 / 912\n", + "408 / 912\n", + "409 / 912\n", + "411 / 912\n", + "412 / 912\n", + "414 / 912\n", + "415 / 912\n", + "417 / 912\n", + "418 / 912\n", + "420 / 912\n", + "421 / 912\n", + "423 / 912\n", + "424 / 912\n", + "426 / 912\n", + "427 / 912\n", + "429 / 912\n", + "430 / 912\n", + "432 / 912\n", + "433 / 912\n", + "435 / 912\n", + "436 / 912\n", + "438 / 912\n", + "439 / 912\n", + "441 / 912\n", + "442 / 912\n", + "444 / 912\n", + "445 / 912\n", + "447 / 912\n", + "448 / 912\n", + "450 / 912\n", + "451 / 912\n", + "453 / 912\n", + "454 / 912\n", + "456 / 912\n", + "457 / 912\n", + "459 / 912\n", + "460 / 912\n", + "462 / 912\n", + "463 / 912\n", + "465 / 912\n", + "466 / 912\n", + "468 / 912\n", + "469 / 912\n", + "471 / 912\n", + "472 / 912\n", + "474 / 912\n", + "475 / 912\n", + "477 / 912\n", + "478 / 912\n", + "480 / 912\n", + "481 / 912\n", + "483 / 912\n", + "484 / 912\n", + "486 / 912\n", + "487 / 912\n", + "489 / 912\n", + "490 / 912\n", + "492 / 912\n", + "493 / 912\n", + "495 / 912\n", + "496 / 912\n", + "498 / 912\n", + "499 / 912\n", + "501 / 912\n", + "502 / 912\n", + "504 / 912\n", + "505 / 912\n", + "507 / 912\n", + "508 / 912\n", + "510 / 912\n", + "511 / 912\n", + "513 / 912\n", + "514 / 912\n", + "516 / 912\n", + "517 / 912\n", + "519 / 912\n", + "520 / 912\n", + "522 / 912\n", + "523 / 912\n", + "525 / 912\n", + "526 / 912\n", + "528 / 912\n", + "529 / 912\n", + "531 / 912\n", + "532 / 912\n", + "534 / 912\n", + "535 / 912\n", + "537 / 912\n", + "538 / 912\n", + "540 / 912\n", + "541 / 912\n", + "543 / 912\n", + "544 / 912\n", + "546 / 912\n", + "547 / 912\n", + "549 / 912\n", + "550 / 912\n", + "552 / 912\n", + "553 / 912\n", + "555 / 912\n", + "556 / 912\n", + "558 / 912\n", + "559 / 912\n", + "561 / 912\n", + "562 / 912\n", + "564 / 912\n", + "565 / 912\n", + "567 / 912\n", + "568 / 912\n", + "570 / 912\n", + "571 / 912\n", + "573 / 912\n", + "574 / 912\n", + "576 / 912\n", + "577 / 912\n", + "579 / 912\n", + "580 / 912\n", + "582 / 912\n", + "583 / 912\n", + "585 / 912\n", + "586 / 912\n", + "588 / 912\n", + "589 / 912\n", + "591 / 912\n", + "592 / 912\n", + "594 / 912\n", + "595 / 912\n", + "597 / 912\n", + "598 / 912\n", + "600 / 912\n", + "601 / 912\n", + "603 / 912\n", + "604 / 912\n", + "606 / 912\n", + "607 / 912\n", + "609 / 912\n", + "610 / 912\n", + "612 / 912\n", + "613 / 912\n", + "615 / 912\n", + "616 / 912\n", + "618 / 912\n", + "619 / 912\n", + "621 / 912\n", + "622 / 912\n", + "624 / 912\n", + "625 / 912\n", + "627 / 912\n", + "628 / 912\n", + "630 / 912\n", + "631 / 912\n", + "633 / 912\n", + "634 / 912\n", + "636 / 912\n", + "637 / 912\n", + "639 / 912\n", + "640 / 912\n", + "642 / 912\n", + "643 / 912\n", + "645 / 912\n", + "646 / 912\n", + "648 / 912\n", + "649 / 912\n", + "651 / 912\n", + "652 / 912\n", + "654 / 912\n", + "655 / 912\n", + "657 / 912\n", + "658 / 912\n", + "660 / 912\n", + "661 / 912\n", + "663 / 912\n", + "664 / 912\n", + "666 / 912\n", + "667 / 912\n", + "669 / 912\n", + "670 / 912\n", + "672 / 912\n", + "673 / 912\n", + "675 / 912\n", + "676 / 912\n", + "678 / 912\n", + "679 / 912\n", + "681 / 912\n", + "682 / 912\n", + "684 / 912\n", + "685 / 912\n", + "687 / 912\n", + "688 / 912\n", + "690 / 912\n", + "691 / 912\n", + "693 / 912\n", + "694 / 912\n", + "696 / 912\n", + "697 / 912\n", + "699 / 912\n", + "700 / 912\n", + "702 / 912\n", + "703 / 912\n", + "705 / 912\n", + "706 / 912\n", + "708 / 912\n", + "709 / 912\n", + "711 / 912\n", + "712 / 912\n", + "714 / 912\n", + "715 / 912\n", + "717 / 912\n", + "718 / 912\n", + "720 / 912\n", + "721 / 912\n", + "723 / 912\n", + "724 / 912\n", + "726 / 912\n", + "727 / 912\n", + "729 / 912\n", + "730 / 912\n", + "732 / 912\n", + "733 / 912\n", + "735 / 912\n", + "736 / 912\n", + "738 / 912\n", + "739 / 912\n", + "741 / 912\n", + "742 / 912\n", + "744 / 912\n", + "745 / 912\n", + "747 / 912\n", + "748 / 912\n", + "750 / 912\n", + "751 / 912\n", + "753 / 912\n", + "754 / 912\n", + "756 / 912\n", + "757 / 912\n", + "759 / 912\n", + "760 / 912\n", + "762 / 912\n", + "763 / 912\n", + "765 / 912\n", + "766 / 912\n", + "768 / 912\n", + "769 / 912\n", + "771 / 912\n", + "772 / 912\n", + "774 / 912\n", + "775 / 912\n", + "777 / 912\n", + "778 / 912\n", + "780 / 912\n", + "781 / 912\n", + "783 / 912\n", + "784 / 912\n", + "786 / 912\n", + "787 / 912\n", + "789 / 912\n", + "790 / 912\n", + "792 / 912\n", + "793 / 912\n", + "795 / 912\n", + "796 / 912\n", + "798 / 912\n", + "799 / 912\n", + "801 / 912\n", + "802 / 912\n", + "804 / 912\n", + "805 / 912\n", + "807 / 912\n", + "808 / 912\n", + "810 / 912\n", + "811 / 912\n", + "813 / 912\n", + "814 / 912\n", + "816 / 912\n", + "817 / 912\n", + "819 / 912\n", + "820 / 912\n", + "822 / 912\n", + "823 / 912\n", + "825 / 912\n", + "826 / 912\n", + "828 / 912\n", + "829 / 912\n", + "831 / 912\n", + "832 / 912\n", + "834 / 912\n", + "835 / 912\n", + "837 / 912\n", + "838 / 912\n", + "840 / 912\n", + "841 / 912\n", + "843 / 912\n", + "844 / 912\n", + "846 / 912\n", + "847 / 912\n", + "849 / 912\n", + "850 / 912\n", + "852 / 912\n", + "853 / 912\n", + "855 / 912\n", + "856 / 912\n", + "858 / 912\n", + "859 / 912\n", + "861 / 912\n", + "862 / 912\n", + "864 / 912\n", + "865 / 912\n", + "867 / 912\n", + "868 / 912\n", + "870 / 912\n", + "871 / 912\n", + "873 / 912\n", + "874 / 912\n", + "876 / 912\n", + "877 / 912\n", + "879 / 912\n", + "880 / 912\n", + "882 / 912\n", + "883 / 912\n", + "885 / 912\n", + "886 / 912\n", + "888 / 912\n", + "889 / 912\n", + "891 / 912\n", + "892 / 912\n", + "894 / 912\n", + "895 / 912\n", + "897 / 912\n", + "898 / 912\n", + "900 / 912\n", + "901 / 912\n", + "903 / 912\n", + "904 / 912\n", + "906 / 912\n", + "907 / 912\n", + "909 / 912\n", + "910 / 912\n", + "912 / 912\n", + "done!\n", + "casting params to fp16....\n", + "done!\n", + "saving star_euro_scale_200_r32_a16.safetensors...\n" + ] + } + ] + }, + { + "cell_type": "code", + "source": [ + "from safetensors.torch import load_file, save_file\n", + "_puff = load_file('/content/drive/MyDrive/Saved from Chrome/pfbkFLUX.safetensors')\n", + "puff = {}\n", + "\n", + "#alpha = 64\n", + "#rank = 64\n", + "\n", + "# = > so scale = 1\n", + "#desired scale = 0.5\n", + "# so multiply matrices by 2 and set alpha to 32\n", + "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n", + "for key in _puff:\n", + " if f'{key}'.find('alpha')>-1:\n", + " puff[f'{key}'] = torch.tensor(32).to(device=device , dtype = torch.float16)\n", + " #print(puff[f'{key}'])\n", + " continue\n", + " puff[f'{key}'] = 2*_puff[f'{key}'].to(device=device , dtype = torch.float16)\n", + "\n", + " #print(puff[f'{key}'].shape)\n", + "\n", + "save_file(puff, 'puff.safetensors')" + ], + "metadata": { + "id": "U8fCk78GimS8" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "from safetensors.torch import load_file, save_file\n", + "_tongue = load_file('/content/drive/MyDrive/Saved from Chrome/tongue-flux-v2.1.safetensors')\n", + "tongue = {}\n", + "# Scale = 32/16 = 2\n", + "# Desired scale = 0.5 => multiply all matrices by 4 and set alpha to 8\n", + "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n", + "for key in _tongue:\n", + " if f'{key}'.find('alpha')>-1:\n", + " tongue[f'{key}'] = torch.tensor(8).to(device=device , dtype = torch.float16)\n", + " continue\n", + " #-------#\n", + " tongue[f'{key}'] = 4*_tongue[f'{key}'].to(device=device , dtype = torch.float16)\n", + "#-------#\n", + "save_file(tongue, 'tongue.safetensors')" + ], + "metadata": { + "id": "lFNa6vgrgdSA" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "\n", + "\n", + "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n", + "\n", + "_oily = load_file('/content/drive/MyDrive/Saved from Chrome/OiledSkin_FluxDev.safetensors')\n", + "\n", + "star = load_file('/content/drive/MyDrive/Saved from Chrome/star_100_r32_16alpha.safetensors')\n", + "#A = vs , B = u\n", + "#lora_down = A , lora_up = B\n", + "\n", + "oily = {}\n", + "for key in _oily:\n", + " if not f'{key}'.find('_A.')>-1:continue\n", + " A = f'{key}'\n", + " B = f'{key}'.replace('_A.','_B.')\n", + " down = f'{key}'.replace('_A.','_down.')\n", + " up = f'{key}'.replace('_A.','_up.')\n", + " #-----#\n", + " oily[f'{up}'] = _oily[f'{B}'].to(device = device , dtype=torch.float16)\n", + " oily[f'{down}'] = _oily[f'{A}'].to(device = device , dtype=torch.float16)\n", + " #------#\n", + " if not f'{key}'.find('to_k.')>-1:continue\n", + " k = f'{key}'\n", + " q = k.replace('to_k.','to_q.')\n", + " v = k.replace('to_k.','to_v.')\n", + "\n", + "print(\"---------OILY---------\")\n", + "for key in oily:\n", + " print(key)\n", + " #if f'{key}'.find('alpha')>-1:print(key)\n", + "\n", + "print(\"---------STAR---------\")\n", + "for key in star:\n", + " break\n", + " print(key)" + ], + "metadata": { + "id": "1oxeJYHRqxQC", + "collapsed": true, + "colab": { + "base_uri": "https://localhost:8080/" + }, + "outputId": "12e3a407-f9d1-403e-949b-31330be59577" + }, + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "---------OILY---------\n", + "transformer.single_transformer_blocks.0.attn.to_k.lora_up.weight\n", + "transformer.single_transformer_blocks.0.attn.to_k.lora_down.weight\n", + "transformer.single_transformer_blocks.0.attn.to_q.lora_up.weight\n", + "transformer.single_transformer_blocks.0.attn.to_q.lora_down.weight\n", + "transformer.single_transformer_blocks.0.attn.to_v.lora_up.weight\n", + "transformer.single_transformer_blocks.0.attn.to_v.lora_down.weight\n", + "transformer.single_transformer_blocks.0.norm.linear.lora_up.weight\n", + "transformer.single_transformer_blocks.0.norm.linear.lora_down.weight\n", + "transformer.single_transformer_blocks.0.proj_mlp.lora_up.weight\n", + "transformer.single_transformer_blocks.0.proj_mlp.lora_down.weight\n", + "transformer.single_transformer_blocks.0.proj_out.lora_up.weight\n", + "transformer.single_transformer_blocks.0.proj_out.lora_down.weight\n", + "transformer.single_transformer_blocks.1.attn.to_k.lora_up.weight\n", + "transformer.single_transformer_blocks.1.attn.to_k.lora_down.weight\n", + "transformer.single_transformer_blocks.1.attn.to_q.lora_up.weight\n", + "transformer.single_transformer_blocks.1.attn.to_q.lora_down.weight\n", + "transformer.single_transformer_blocks.1.attn.to_v.lora_up.weight\n", + "transformer.single_transformer_blocks.1.attn.to_v.lora_down.weight\n", + "transformer.single_transformer_blocks.1.norm.linear.lora_up.weight\n", + "transformer.single_transformer_blocks.1.norm.linear.lora_down.weight\n", + "transformer.single_transformer_blocks.1.proj_mlp.lora_up.weight\n", + "transformer.single_transformer_blocks.1.proj_mlp.lora_down.weight\n", + "transformer.single_transformer_blocks.1.proj_out.lora_up.weight\n", + "transformer.single_transformer_blocks.1.proj_out.lora_down.weight\n", + "transformer.single_transformer_blocks.10.attn.to_k.lora_up.weight\n", + "transformer.single_transformer_blocks.10.attn.to_k.lora_down.weight\n", + "transformer.single_transformer_blocks.10.attn.to_q.lora_up.weight\n", + "transformer.single_transformer_blocks.10.attn.to_q.lora_down.weight\n", + "transformer.single_transformer_blocks.10.attn.to_v.lora_up.weight\n", + "transformer.single_transformer_blocks.10.attn.to_v.lora_down.weight\n", + "transformer.single_transformer_blocks.10.norm.linear.lora_up.weight\n", + "transformer.single_transformer_blocks.10.norm.linear.lora_down.weight\n", + "transformer.single_transformer_blocks.10.proj_mlp.lora_up.weight\n", + "transformer.single_transformer_blocks.10.proj_mlp.lora_down.weight\n", + "transformer.single_transformer_blocks.10.proj_out.lora_up.weight\n", + "transformer.single_transformer_blocks.10.proj_out.lora_down.weight\n", + "transformer.single_transformer_blocks.11.attn.to_k.lora_up.weight\n", + "transformer.single_transformer_blocks.11.attn.to_k.lora_down.weight\n", + "transformer.single_transformer_blocks.11.attn.to_q.lora_up.weight\n", + "transformer.single_transformer_blocks.11.attn.to_q.lora_down.weight\n", + "transformer.single_transformer_blocks.11.attn.to_v.lora_up.weight\n", + "transformer.single_transformer_blocks.11.attn.to_v.lora_down.weight\n", + "transformer.single_transformer_blocks.11.norm.linear.lora_up.weight\n", + "transformer.single_transformer_blocks.11.norm.linear.lora_down.weight\n", + "transformer.single_transformer_blocks.11.proj_mlp.lora_up.weight\n", + "transformer.single_transformer_blocks.11.proj_mlp.lora_down.weight\n", + "transformer.single_transformer_blocks.11.proj_out.lora_up.weight\n", + "transformer.single_transformer_blocks.11.proj_out.lora_down.weight\n", + "transformer.single_transformer_blocks.12.attn.to_k.lora_up.weight\n", + "transformer.single_transformer_blocks.12.attn.to_k.lora_down.weight\n", + "transformer.single_transformer_blocks.12.attn.to_q.lora_up.weight\n", + "transformer.single_transformer_blocks.12.attn.to_q.lora_down.weight\n", + "transformer.single_transformer_blocks.12.attn.to_v.lora_up.weight\n", + "transformer.single_transformer_blocks.12.attn.to_v.lora_down.weight\n", + "transformer.single_transformer_blocks.12.norm.linear.lora_up.weight\n", + "transformer.single_transformer_blocks.12.norm.linear.lora_down.weight\n", + "transformer.single_transformer_blocks.12.proj_mlp.lora_up.weight\n", + "transformer.single_transformer_blocks.12.proj_mlp.lora_down.weight\n", + "transformer.single_transformer_blocks.12.proj_out.lora_up.weight\n", + "transformer.single_transformer_blocks.12.proj_out.lora_down.weight\n", + "transformer.single_transformer_blocks.13.attn.to_k.lora_up.weight\n", + "transformer.single_transformer_blocks.13.attn.to_k.lora_down.weight\n", + "transformer.single_transformer_blocks.13.attn.to_q.lora_up.weight\n", + "transformer.single_transformer_blocks.13.attn.to_q.lora_down.weight\n", + "transformer.single_transformer_blocks.13.attn.to_v.lora_up.weight\n", + "transformer.single_transformer_blocks.13.attn.to_v.lora_down.weight\n", + "transformer.single_transformer_blocks.13.norm.linear.lora_up.weight\n", + "transformer.single_transformer_blocks.13.norm.linear.lora_down.weight\n", + "transformer.single_transformer_blocks.13.proj_mlp.lora_up.weight\n", + "transformer.single_transformer_blocks.13.proj_mlp.lora_down.weight\n", + "transformer.single_transformer_blocks.13.proj_out.lora_up.weight\n", + "transformer.single_transformer_blocks.13.proj_out.lora_down.weight\n", + "transformer.single_transformer_blocks.14.attn.to_k.lora_up.weight\n", + "transformer.single_transformer_blocks.14.attn.to_k.lora_down.weight\n", + "transformer.single_transformer_blocks.14.attn.to_q.lora_up.weight\n", + "transformer.single_transformer_blocks.14.attn.to_q.lora_down.weight\n", + "transformer.single_transformer_blocks.14.attn.to_v.lora_up.weight\n", + "transformer.single_transformer_blocks.14.attn.to_v.lora_down.weight\n", + "transformer.single_transformer_blocks.14.norm.linear.lora_up.weight\n", + "transformer.single_transformer_blocks.14.norm.linear.lora_down.weight\n", + "transformer.single_transformer_blocks.14.proj_mlp.lora_up.weight\n", + "transformer.single_transformer_blocks.14.proj_mlp.lora_down.weight\n", + "transformer.single_transformer_blocks.14.proj_out.lora_up.weight\n", + "transformer.single_transformer_blocks.14.proj_out.lora_down.weight\n", + "transformer.single_transformer_blocks.15.attn.to_k.lora_up.weight\n", + "transformer.single_transformer_blocks.15.attn.to_k.lora_down.weight\n", + "transformer.single_transformer_blocks.15.attn.to_q.lora_up.weight\n", + "transformer.single_transformer_blocks.15.attn.to_q.lora_down.weight\n", + "transformer.single_transformer_blocks.15.attn.to_v.lora_up.weight\n", + "transformer.single_transformer_blocks.15.attn.to_v.lora_down.weight\n", + "transformer.single_transformer_blocks.15.norm.linear.lora_up.weight\n", + "transformer.single_transformer_blocks.15.norm.linear.lora_down.weight\n", + "transformer.single_transformer_blocks.15.proj_mlp.lora_up.weight\n", + "transformer.single_transformer_blocks.15.proj_mlp.lora_down.weight\n", + "transformer.single_transformer_blocks.15.proj_out.lora_up.weight\n", + "transformer.single_transformer_blocks.15.proj_out.lora_down.weight\n", + "transformer.single_transformer_blocks.16.attn.to_k.lora_up.weight\n", + "transformer.single_transformer_blocks.16.attn.to_k.lora_down.weight\n", + "transformer.single_transformer_blocks.16.attn.to_q.lora_up.weight\n", + "transformer.single_transformer_blocks.16.attn.to_q.lora_down.weight\n", + "transformer.single_transformer_blocks.16.attn.to_v.lora_up.weight\n", + "transformer.single_transformer_blocks.16.attn.to_v.lora_down.weight\n", + "transformer.single_transformer_blocks.16.norm.linear.lora_up.weight\n", + "transformer.single_transformer_blocks.16.norm.linear.lora_down.weight\n", + "transformer.single_transformer_blocks.16.proj_mlp.lora_up.weight\n", + "transformer.single_transformer_blocks.16.proj_mlp.lora_down.weight\n", + "transformer.single_transformer_blocks.16.proj_out.lora_up.weight\n", + "transformer.single_transformer_blocks.16.proj_out.lora_down.weight\n", + "transformer.single_transformer_blocks.17.attn.to_k.lora_up.weight\n", + "transformer.single_transformer_blocks.17.attn.to_k.lora_down.weight\n", + "transformer.single_transformer_blocks.17.attn.to_q.lora_up.weight\n", + "transformer.single_transformer_blocks.17.attn.to_q.lora_down.weight\n", + "transformer.single_transformer_blocks.17.attn.to_v.lora_up.weight\n", + "transformer.single_transformer_blocks.17.attn.to_v.lora_down.weight\n", + "transformer.single_transformer_blocks.17.norm.linear.lora_up.weight\n", + "transformer.single_transformer_blocks.17.norm.linear.lora_down.weight\n", + "transformer.single_transformer_blocks.17.proj_mlp.lora_up.weight\n", + "transformer.single_transformer_blocks.17.proj_mlp.lora_down.weight\n", + "transformer.single_transformer_blocks.17.proj_out.lora_up.weight\n", + "transformer.single_transformer_blocks.17.proj_out.lora_down.weight\n", + "transformer.single_transformer_blocks.18.attn.to_k.lora_up.weight\n", + "transformer.single_transformer_blocks.18.attn.to_k.lora_down.weight\n", + "transformer.single_transformer_blocks.18.attn.to_q.lora_up.weight\n", + "transformer.single_transformer_blocks.18.attn.to_q.lora_down.weight\n", + "transformer.single_transformer_blocks.18.attn.to_v.lora_up.weight\n", + "transformer.single_transformer_blocks.18.attn.to_v.lora_down.weight\n", + "transformer.single_transformer_blocks.18.norm.linear.lora_up.weight\n", + "transformer.single_transformer_blocks.18.norm.linear.lora_down.weight\n", + "transformer.single_transformer_blocks.18.proj_mlp.lora_up.weight\n", + "transformer.single_transformer_blocks.18.proj_mlp.lora_down.weight\n", + "transformer.single_transformer_blocks.18.proj_out.lora_up.weight\n", + "transformer.single_transformer_blocks.18.proj_out.lora_down.weight\n", + "transformer.single_transformer_blocks.19.attn.to_k.lora_up.weight\n", + "transformer.single_transformer_blocks.19.attn.to_k.lora_down.weight\n", + "transformer.single_transformer_blocks.19.attn.to_q.lora_up.weight\n", + "transformer.single_transformer_blocks.19.attn.to_q.lora_down.weight\n", + "transformer.single_transformer_blocks.19.attn.to_v.lora_up.weight\n", + "transformer.single_transformer_blocks.19.attn.to_v.lora_down.weight\n", + "transformer.single_transformer_blocks.19.norm.linear.lora_up.weight\n", + "transformer.single_transformer_blocks.19.norm.linear.lora_down.weight\n", + "transformer.single_transformer_blocks.19.proj_mlp.lora_up.weight\n", + "transformer.single_transformer_blocks.19.proj_mlp.lora_down.weight\n", + "transformer.single_transformer_blocks.19.proj_out.lora_up.weight\n", + "transformer.single_transformer_blocks.19.proj_out.lora_down.weight\n", + "transformer.single_transformer_blocks.2.attn.to_k.lora_up.weight\n", + "transformer.single_transformer_blocks.2.attn.to_k.lora_down.weight\n", + "transformer.single_transformer_blocks.2.attn.to_q.lora_up.weight\n", + "transformer.single_transformer_blocks.2.attn.to_q.lora_down.weight\n", + "transformer.single_transformer_blocks.2.attn.to_v.lora_up.weight\n", + "transformer.single_transformer_blocks.2.attn.to_v.lora_down.weight\n", + "transformer.single_transformer_blocks.2.norm.linear.lora_up.weight\n", + "transformer.single_transformer_blocks.2.norm.linear.lora_down.weight\n", + "transformer.single_transformer_blocks.2.proj_mlp.lora_up.weight\n", + "transformer.single_transformer_blocks.2.proj_mlp.lora_down.weight\n", + "transformer.single_transformer_blocks.2.proj_out.lora_up.weight\n", + "transformer.single_transformer_blocks.2.proj_out.lora_down.weight\n", + "transformer.single_transformer_blocks.20.attn.to_k.lora_up.weight\n", + "transformer.single_transformer_blocks.20.attn.to_k.lora_down.weight\n", + "transformer.single_transformer_blocks.20.attn.to_q.lora_up.weight\n", + "transformer.single_transformer_blocks.20.attn.to_q.lora_down.weight\n", + "transformer.single_transformer_blocks.20.attn.to_v.lora_up.weight\n", + "transformer.single_transformer_blocks.20.attn.to_v.lora_down.weight\n", + "transformer.single_transformer_blocks.20.norm.linear.lora_up.weight\n", + "transformer.single_transformer_blocks.20.norm.linear.lora_down.weight\n", + "transformer.single_transformer_blocks.20.proj_mlp.lora_up.weight\n", + "transformer.single_transformer_blocks.20.proj_mlp.lora_down.weight\n", + "transformer.single_transformer_blocks.20.proj_out.lora_up.weight\n", + "transformer.single_transformer_blocks.20.proj_out.lora_down.weight\n", + "transformer.single_transformer_blocks.21.attn.to_k.lora_up.weight\n", + "transformer.single_transformer_blocks.21.attn.to_k.lora_down.weight\n", + "transformer.single_transformer_blocks.21.attn.to_q.lora_up.weight\n", + "transformer.single_transformer_blocks.21.attn.to_q.lora_down.weight\n", + "transformer.single_transformer_blocks.21.attn.to_v.lora_up.weight\n", + "transformer.single_transformer_blocks.21.attn.to_v.lora_down.weight\n", + "transformer.single_transformer_blocks.21.norm.linear.lora_up.weight\n", + "transformer.single_transformer_blocks.21.norm.linear.lora_down.weight\n", + "transformer.single_transformer_blocks.21.proj_mlp.lora_up.weight\n", + "transformer.single_transformer_blocks.21.proj_mlp.lora_down.weight\n", + "transformer.single_transformer_blocks.21.proj_out.lora_up.weight\n", + "transformer.single_transformer_blocks.21.proj_out.lora_down.weight\n", + "transformer.single_transformer_blocks.22.attn.to_k.lora_up.weight\n", + "transformer.single_transformer_blocks.22.attn.to_k.lora_down.weight\n", + "transformer.single_transformer_blocks.22.attn.to_q.lora_up.weight\n", + "transformer.single_transformer_blocks.22.attn.to_q.lora_down.weight\n", + "transformer.single_transformer_blocks.22.attn.to_v.lora_up.weight\n", + "transformer.single_transformer_blocks.22.attn.to_v.lora_down.weight\n", + "transformer.single_transformer_blocks.22.norm.linear.lora_up.weight\n", + "transformer.single_transformer_blocks.22.norm.linear.lora_down.weight\n", + "transformer.single_transformer_blocks.22.proj_mlp.lora_up.weight\n", + "transformer.single_transformer_blocks.22.proj_mlp.lora_down.weight\n", + "transformer.single_transformer_blocks.22.proj_out.lora_up.weight\n", + "transformer.single_transformer_blocks.22.proj_out.lora_down.weight\n", + "transformer.single_transformer_blocks.23.attn.to_k.lora_up.weight\n", + "transformer.single_transformer_blocks.23.attn.to_k.lora_down.weight\n", + "transformer.single_transformer_blocks.23.attn.to_q.lora_up.weight\n", + "transformer.single_transformer_blocks.23.attn.to_q.lora_down.weight\n", + "transformer.single_transformer_blocks.23.attn.to_v.lora_up.weight\n", + "transformer.single_transformer_blocks.23.attn.to_v.lora_down.weight\n", + "transformer.single_transformer_blocks.23.norm.linear.lora_up.weight\n", + "transformer.single_transformer_blocks.23.norm.linear.lora_down.weight\n", + "transformer.single_transformer_blocks.23.proj_mlp.lora_up.weight\n", + "transformer.single_transformer_blocks.23.proj_mlp.lora_down.weight\n", + "transformer.single_transformer_blocks.23.proj_out.lora_up.weight\n", + "transformer.single_transformer_blocks.23.proj_out.lora_down.weight\n", + "transformer.single_transformer_blocks.24.attn.to_k.lora_up.weight\n", + "transformer.single_transformer_blocks.24.attn.to_k.lora_down.weight\n", + "transformer.single_transformer_blocks.24.attn.to_q.lora_up.weight\n", + "transformer.single_transformer_blocks.24.attn.to_q.lora_down.weight\n", + "transformer.single_transformer_blocks.24.attn.to_v.lora_up.weight\n", + "transformer.single_transformer_blocks.24.attn.to_v.lora_down.weight\n", + "transformer.single_transformer_blocks.24.norm.linear.lora_up.weight\n", + "transformer.single_transformer_blocks.24.norm.linear.lora_down.weight\n", + "transformer.single_transformer_blocks.24.proj_mlp.lora_up.weight\n", + "transformer.single_transformer_blocks.24.proj_mlp.lora_down.weight\n", + "transformer.single_transformer_blocks.24.proj_out.lora_up.weight\n", + "transformer.single_transformer_blocks.24.proj_out.lora_down.weight\n", + "transformer.single_transformer_blocks.25.attn.to_k.lora_up.weight\n", + "transformer.single_transformer_blocks.25.attn.to_k.lora_down.weight\n", + "transformer.single_transformer_blocks.25.attn.to_q.lora_up.weight\n", + "transformer.single_transformer_blocks.25.attn.to_q.lora_down.weight\n", + "transformer.single_transformer_blocks.25.attn.to_v.lora_up.weight\n", + "transformer.single_transformer_blocks.25.attn.to_v.lora_down.weight\n", + "transformer.single_transformer_blocks.25.norm.linear.lora_up.weight\n", + "transformer.single_transformer_blocks.25.norm.linear.lora_down.weight\n", + "transformer.single_transformer_blocks.25.proj_mlp.lora_up.weight\n", + "transformer.single_transformer_blocks.25.proj_mlp.lora_down.weight\n", + "transformer.single_transformer_blocks.25.proj_out.lora_up.weight\n", + "transformer.single_transformer_blocks.25.proj_out.lora_down.weight\n", + "transformer.single_transformer_blocks.26.attn.to_k.lora_up.weight\n", + "transformer.single_transformer_blocks.26.attn.to_k.lora_down.weight\n", + "transformer.single_transformer_blocks.26.attn.to_q.lora_up.weight\n", + "transformer.single_transformer_blocks.26.attn.to_q.lora_down.weight\n", + "transformer.single_transformer_blocks.26.attn.to_v.lora_up.weight\n", + "transformer.single_transformer_blocks.26.attn.to_v.lora_down.weight\n", + "transformer.single_transformer_blocks.26.norm.linear.lora_up.weight\n", + "transformer.single_transformer_blocks.26.norm.linear.lora_down.weight\n", + "transformer.single_transformer_blocks.26.proj_mlp.lora_up.weight\n", + "transformer.single_transformer_blocks.26.proj_mlp.lora_down.weight\n", + "transformer.single_transformer_blocks.26.proj_out.lora_up.weight\n", + "transformer.single_transformer_blocks.26.proj_out.lora_down.weight\n", + "transformer.single_transformer_blocks.27.attn.to_k.lora_up.weight\n", + "transformer.single_transformer_blocks.27.attn.to_k.lora_down.weight\n", + "transformer.single_transformer_blocks.27.attn.to_q.lora_up.weight\n", + "transformer.single_transformer_blocks.27.attn.to_q.lora_down.weight\n", + "transformer.single_transformer_blocks.27.attn.to_v.lora_up.weight\n", + "transformer.single_transformer_blocks.27.attn.to_v.lora_down.weight\n", + "transformer.single_transformer_blocks.27.norm.linear.lora_up.weight\n", + "transformer.single_transformer_blocks.27.norm.linear.lora_down.weight\n", + "transformer.single_transformer_blocks.27.proj_mlp.lora_up.weight\n", + "transformer.single_transformer_blocks.27.proj_mlp.lora_down.weight\n", + "transformer.single_transformer_blocks.27.proj_out.lora_up.weight\n", + "transformer.single_transformer_blocks.27.proj_out.lora_down.weight\n", + "transformer.single_transformer_blocks.28.attn.to_k.lora_up.weight\n", + "transformer.single_transformer_blocks.28.attn.to_k.lora_down.weight\n", + "transformer.single_transformer_blocks.28.attn.to_q.lora_up.weight\n", + "transformer.single_transformer_blocks.28.attn.to_q.lora_down.weight\n", + "transformer.single_transformer_blocks.28.attn.to_v.lora_up.weight\n", + "transformer.single_transformer_blocks.28.attn.to_v.lora_down.weight\n", + "transformer.single_transformer_blocks.28.norm.linear.lora_up.weight\n", + "transformer.single_transformer_blocks.28.norm.linear.lora_down.weight\n", + "transformer.single_transformer_blocks.28.proj_mlp.lora_up.weight\n", + "transformer.single_transformer_blocks.28.proj_mlp.lora_down.weight\n", + "transformer.single_transformer_blocks.28.proj_out.lora_up.weight\n", + "transformer.single_transformer_blocks.28.proj_out.lora_down.weight\n", + "transformer.single_transformer_blocks.29.attn.to_k.lora_up.weight\n", + "transformer.single_transformer_blocks.29.attn.to_k.lora_down.weight\n", + "transformer.single_transformer_blocks.29.attn.to_q.lora_up.weight\n", + "transformer.single_transformer_blocks.29.attn.to_q.lora_down.weight\n", + "transformer.single_transformer_blocks.29.attn.to_v.lora_up.weight\n", + "transformer.single_transformer_blocks.29.attn.to_v.lora_down.weight\n", + "transformer.single_transformer_blocks.29.norm.linear.lora_up.weight\n", + "transformer.single_transformer_blocks.29.norm.linear.lora_down.weight\n", + "transformer.single_transformer_blocks.29.proj_mlp.lora_up.weight\n", + "transformer.single_transformer_blocks.29.proj_mlp.lora_down.weight\n", + "transformer.single_transformer_blocks.29.proj_out.lora_up.weight\n", + "transformer.single_transformer_blocks.29.proj_out.lora_down.weight\n", + "transformer.single_transformer_blocks.3.attn.to_k.lora_up.weight\n", + "transformer.single_transformer_blocks.3.attn.to_k.lora_down.weight\n", + "transformer.single_transformer_blocks.3.attn.to_q.lora_up.weight\n", + "transformer.single_transformer_blocks.3.attn.to_q.lora_down.weight\n", + "transformer.single_transformer_blocks.3.attn.to_v.lora_up.weight\n", + "transformer.single_transformer_blocks.3.attn.to_v.lora_down.weight\n", + "transformer.single_transformer_blocks.3.norm.linear.lora_up.weight\n", + "transformer.single_transformer_blocks.3.norm.linear.lora_down.weight\n", + "transformer.single_transformer_blocks.3.proj_mlp.lora_up.weight\n", + "transformer.single_transformer_blocks.3.proj_mlp.lora_down.weight\n", + "transformer.single_transformer_blocks.3.proj_out.lora_up.weight\n", + "transformer.single_transformer_blocks.3.proj_out.lora_down.weight\n", + "transformer.single_transformer_blocks.30.attn.to_k.lora_up.weight\n", + "transformer.single_transformer_blocks.30.attn.to_k.lora_down.weight\n", + "transformer.single_transformer_blocks.30.attn.to_q.lora_up.weight\n", + "transformer.single_transformer_blocks.30.attn.to_q.lora_down.weight\n", + "transformer.single_transformer_blocks.30.attn.to_v.lora_up.weight\n", + "transformer.single_transformer_blocks.30.attn.to_v.lora_down.weight\n", + "transformer.single_transformer_blocks.30.norm.linear.lora_up.weight\n", + "transformer.single_transformer_blocks.30.norm.linear.lora_down.weight\n", + "transformer.single_transformer_blocks.30.proj_mlp.lora_up.weight\n", + "transformer.single_transformer_blocks.30.proj_mlp.lora_down.weight\n", + "transformer.single_transformer_blocks.30.proj_out.lora_up.weight\n", + "transformer.single_transformer_blocks.30.proj_out.lora_down.weight\n", + "transformer.single_transformer_blocks.31.attn.to_k.lora_up.weight\n", + "transformer.single_transformer_blocks.31.attn.to_k.lora_down.weight\n", + "transformer.single_transformer_blocks.31.attn.to_q.lora_up.weight\n", + "transformer.single_transformer_blocks.31.attn.to_q.lora_down.weight\n", + "transformer.single_transformer_blocks.31.attn.to_v.lora_up.weight\n", + "transformer.single_transformer_blocks.31.attn.to_v.lora_down.weight\n", + "transformer.single_transformer_blocks.31.norm.linear.lora_up.weight\n", + "transformer.single_transformer_blocks.31.norm.linear.lora_down.weight\n", + "transformer.single_transformer_blocks.31.proj_mlp.lora_up.weight\n", + "transformer.single_transformer_blocks.31.proj_mlp.lora_down.weight\n", + "transformer.single_transformer_blocks.31.proj_out.lora_up.weight\n", + "transformer.single_transformer_blocks.31.proj_out.lora_down.weight\n", + "transformer.single_transformer_blocks.32.attn.to_k.lora_up.weight\n", + "transformer.single_transformer_blocks.32.attn.to_k.lora_down.weight\n", + "transformer.single_transformer_blocks.32.attn.to_q.lora_up.weight\n", + "transformer.single_transformer_blocks.32.attn.to_q.lora_down.weight\n", + "transformer.single_transformer_blocks.32.attn.to_v.lora_up.weight\n", + "transformer.single_transformer_blocks.32.attn.to_v.lora_down.weight\n", + "transformer.single_transformer_blocks.32.norm.linear.lora_up.weight\n", + "transformer.single_transformer_blocks.32.norm.linear.lora_down.weight\n", + "transformer.single_transformer_blocks.32.proj_mlp.lora_up.weight\n", + "transformer.single_transformer_blocks.32.proj_mlp.lora_down.weight\n", + "transformer.single_transformer_blocks.32.proj_out.lora_up.weight\n", + "transformer.single_transformer_blocks.32.proj_out.lora_down.weight\n", + "transformer.single_transformer_blocks.33.attn.to_k.lora_up.weight\n", + "transformer.single_transformer_blocks.33.attn.to_k.lora_down.weight\n", + "transformer.single_transformer_blocks.33.attn.to_q.lora_up.weight\n", + "transformer.single_transformer_blocks.33.attn.to_q.lora_down.weight\n", + "transformer.single_transformer_blocks.33.attn.to_v.lora_up.weight\n", + "transformer.single_transformer_blocks.33.attn.to_v.lora_down.weight\n", + "transformer.single_transformer_blocks.33.norm.linear.lora_up.weight\n", + "transformer.single_transformer_blocks.33.norm.linear.lora_down.weight\n", + "transformer.single_transformer_blocks.33.proj_mlp.lora_up.weight\n", + "transformer.single_transformer_blocks.33.proj_mlp.lora_down.weight\n", + "transformer.single_transformer_blocks.33.proj_out.lora_up.weight\n", + "transformer.single_transformer_blocks.33.proj_out.lora_down.weight\n", + "transformer.single_transformer_blocks.34.attn.to_k.lora_up.weight\n", + "transformer.single_transformer_blocks.34.attn.to_k.lora_down.weight\n", + "transformer.single_transformer_blocks.34.attn.to_q.lora_up.weight\n", + "transformer.single_transformer_blocks.34.attn.to_q.lora_down.weight\n", + "transformer.single_transformer_blocks.34.attn.to_v.lora_up.weight\n", + "transformer.single_transformer_blocks.34.attn.to_v.lora_down.weight\n", + "transformer.single_transformer_blocks.34.norm.linear.lora_up.weight\n", + "transformer.single_transformer_blocks.34.norm.linear.lora_down.weight\n", + "transformer.single_transformer_blocks.34.proj_mlp.lora_up.weight\n", + "transformer.single_transformer_blocks.34.proj_mlp.lora_down.weight\n", + "transformer.single_transformer_blocks.34.proj_out.lora_up.weight\n", + "transformer.single_transformer_blocks.34.proj_out.lora_down.weight\n", + "transformer.single_transformer_blocks.35.attn.to_k.lora_up.weight\n", + "transformer.single_transformer_blocks.35.attn.to_k.lora_down.weight\n", + "transformer.single_transformer_blocks.35.attn.to_q.lora_up.weight\n", + "transformer.single_transformer_blocks.35.attn.to_q.lora_down.weight\n", + "transformer.single_transformer_blocks.35.attn.to_v.lora_up.weight\n", + "transformer.single_transformer_blocks.35.attn.to_v.lora_down.weight\n", + "transformer.single_transformer_blocks.35.norm.linear.lora_up.weight\n", + "transformer.single_transformer_blocks.35.norm.linear.lora_down.weight\n", + "transformer.single_transformer_blocks.35.proj_mlp.lora_up.weight\n", + "transformer.single_transformer_blocks.35.proj_mlp.lora_down.weight\n", + "transformer.single_transformer_blocks.35.proj_out.lora_up.weight\n", + "transformer.single_transformer_blocks.35.proj_out.lora_down.weight\n", + "transformer.single_transformer_blocks.36.attn.to_k.lora_up.weight\n", + "transformer.single_transformer_blocks.36.attn.to_k.lora_down.weight\n", + "transformer.single_transformer_blocks.36.attn.to_q.lora_up.weight\n", + "transformer.single_transformer_blocks.36.attn.to_q.lora_down.weight\n", + "transformer.single_transformer_blocks.36.attn.to_v.lora_up.weight\n", + "transformer.single_transformer_blocks.36.attn.to_v.lora_down.weight\n", + "transformer.single_transformer_blocks.36.norm.linear.lora_up.weight\n", + "transformer.single_transformer_blocks.36.norm.linear.lora_down.weight\n", + "transformer.single_transformer_blocks.36.proj_mlp.lora_up.weight\n", + "transformer.single_transformer_blocks.36.proj_mlp.lora_down.weight\n", + "transformer.single_transformer_blocks.36.proj_out.lora_up.weight\n", + "transformer.single_transformer_blocks.36.proj_out.lora_down.weight\n", + "transformer.single_transformer_blocks.37.attn.to_k.lora_up.weight\n", + "transformer.single_transformer_blocks.37.attn.to_k.lora_down.weight\n", + "transformer.single_transformer_blocks.37.attn.to_q.lora_up.weight\n", + "transformer.single_transformer_blocks.37.attn.to_q.lora_down.weight\n", + "transformer.single_transformer_blocks.37.attn.to_v.lora_up.weight\n", + "transformer.single_transformer_blocks.37.attn.to_v.lora_down.weight\n", + "transformer.single_transformer_blocks.37.norm.linear.lora_up.weight\n", + "transformer.single_transformer_blocks.37.norm.linear.lora_down.weight\n", + "transformer.single_transformer_blocks.37.proj_mlp.lora_up.weight\n", + "transformer.single_transformer_blocks.37.proj_mlp.lora_down.weight\n", + "transformer.single_transformer_blocks.37.proj_out.lora_up.weight\n", + "transformer.single_transformer_blocks.37.proj_out.lora_down.weight\n", + "transformer.single_transformer_blocks.4.attn.to_k.lora_up.weight\n", + "transformer.single_transformer_blocks.4.attn.to_k.lora_down.weight\n", + "transformer.single_transformer_blocks.4.attn.to_q.lora_up.weight\n", + "transformer.single_transformer_blocks.4.attn.to_q.lora_down.weight\n", + "transformer.single_transformer_blocks.4.attn.to_v.lora_up.weight\n", + "transformer.single_transformer_blocks.4.attn.to_v.lora_down.weight\n", + "transformer.single_transformer_blocks.4.norm.linear.lora_up.weight\n", + "transformer.single_transformer_blocks.4.norm.linear.lora_down.weight\n", + "transformer.single_transformer_blocks.4.proj_mlp.lora_up.weight\n", + "transformer.single_transformer_blocks.4.proj_mlp.lora_down.weight\n", + "transformer.single_transformer_blocks.4.proj_out.lora_up.weight\n", + "transformer.single_transformer_blocks.4.proj_out.lora_down.weight\n", + "transformer.single_transformer_blocks.5.attn.to_k.lora_up.weight\n", + "transformer.single_transformer_blocks.5.attn.to_k.lora_down.weight\n", + "transformer.single_transformer_blocks.5.attn.to_q.lora_up.weight\n", + "transformer.single_transformer_blocks.5.attn.to_q.lora_down.weight\n", + "transformer.single_transformer_blocks.5.attn.to_v.lora_up.weight\n", + "transformer.single_transformer_blocks.5.attn.to_v.lora_down.weight\n", + "transformer.single_transformer_blocks.5.norm.linear.lora_up.weight\n", + "transformer.single_transformer_blocks.5.norm.linear.lora_down.weight\n", + "transformer.single_transformer_blocks.5.proj_mlp.lora_up.weight\n", + "transformer.single_transformer_blocks.5.proj_mlp.lora_down.weight\n", + "transformer.single_transformer_blocks.5.proj_out.lora_up.weight\n", + "transformer.single_transformer_blocks.5.proj_out.lora_down.weight\n", + "transformer.single_transformer_blocks.6.attn.to_k.lora_up.weight\n", + "transformer.single_transformer_blocks.6.attn.to_k.lora_down.weight\n", + "transformer.single_transformer_blocks.6.attn.to_q.lora_up.weight\n", + "transformer.single_transformer_blocks.6.attn.to_q.lora_down.weight\n", + "transformer.single_transformer_blocks.6.attn.to_v.lora_up.weight\n", + "transformer.single_transformer_blocks.6.attn.to_v.lora_down.weight\n", + "transformer.single_transformer_blocks.6.norm.linear.lora_up.weight\n", + "transformer.single_transformer_blocks.6.norm.linear.lora_down.weight\n", + "transformer.single_transformer_blocks.6.proj_mlp.lora_up.weight\n", + "transformer.single_transformer_blocks.6.proj_mlp.lora_down.weight\n", + "transformer.single_transformer_blocks.6.proj_out.lora_up.weight\n", + "transformer.single_transformer_blocks.6.proj_out.lora_down.weight\n", + "transformer.single_transformer_blocks.7.attn.to_k.lora_up.weight\n", + "transformer.single_transformer_blocks.7.attn.to_k.lora_down.weight\n", + "transformer.single_transformer_blocks.7.attn.to_q.lora_up.weight\n", + "transformer.single_transformer_blocks.7.attn.to_q.lora_down.weight\n", + "transformer.single_transformer_blocks.7.attn.to_v.lora_up.weight\n", + "transformer.single_transformer_blocks.7.attn.to_v.lora_down.weight\n", + "transformer.single_transformer_blocks.7.norm.linear.lora_up.weight\n", + "transformer.single_transformer_blocks.7.norm.linear.lora_down.weight\n", + "transformer.single_transformer_blocks.7.proj_mlp.lora_up.weight\n", + "transformer.single_transformer_blocks.7.proj_mlp.lora_down.weight\n", + "transformer.single_transformer_blocks.7.proj_out.lora_up.weight\n", + "transformer.single_transformer_blocks.7.proj_out.lora_down.weight\n", + "transformer.single_transformer_blocks.8.attn.to_k.lora_up.weight\n", + "transformer.single_transformer_blocks.8.attn.to_k.lora_down.weight\n", + "transformer.single_transformer_blocks.8.attn.to_q.lora_up.weight\n", + "transformer.single_transformer_blocks.8.attn.to_q.lora_down.weight\n", + "transformer.single_transformer_blocks.8.attn.to_v.lora_up.weight\n", + "transformer.single_transformer_blocks.8.attn.to_v.lora_down.weight\n", + "transformer.single_transformer_blocks.8.norm.linear.lora_up.weight\n", + "transformer.single_transformer_blocks.8.norm.linear.lora_down.weight\n", + "transformer.single_transformer_blocks.8.proj_mlp.lora_up.weight\n", + "transformer.single_transformer_blocks.8.proj_mlp.lora_down.weight\n", + "transformer.single_transformer_blocks.8.proj_out.lora_up.weight\n", + "transformer.single_transformer_blocks.8.proj_out.lora_down.weight\n", + "transformer.single_transformer_blocks.9.attn.to_k.lora_up.weight\n", + "transformer.single_transformer_blocks.9.attn.to_k.lora_down.weight\n", + "transformer.single_transformer_blocks.9.attn.to_q.lora_up.weight\n", + "transformer.single_transformer_blocks.9.attn.to_q.lora_down.weight\n", + "transformer.single_transformer_blocks.9.attn.to_v.lora_up.weight\n", + "transformer.single_transformer_blocks.9.attn.to_v.lora_down.weight\n", + "transformer.single_transformer_blocks.9.norm.linear.lora_up.weight\n", + "transformer.single_transformer_blocks.9.norm.linear.lora_down.weight\n", + "transformer.single_transformer_blocks.9.proj_mlp.lora_up.weight\n", + "transformer.single_transformer_blocks.9.proj_mlp.lora_down.weight\n", + "transformer.single_transformer_blocks.9.proj_out.lora_up.weight\n", + "transformer.single_transformer_blocks.9.proj_out.lora_down.weight\n", + "transformer.transformer_blocks.0.attn.add_k_proj.lora_up.weight\n", + "transformer.transformer_blocks.0.attn.add_k_proj.lora_down.weight\n", + "transformer.transformer_blocks.0.attn.add_q_proj.lora_up.weight\n", + "transformer.transformer_blocks.0.attn.add_q_proj.lora_down.weight\n", + "transformer.transformer_blocks.0.attn.add_v_proj.lora_up.weight\n", + "transformer.transformer_blocks.0.attn.add_v_proj.lora_down.weight\n", + "transformer.transformer_blocks.0.attn.to_add_out.lora_up.weight\n", + "transformer.transformer_blocks.0.attn.to_add_out.lora_down.weight\n", + "transformer.transformer_blocks.0.attn.to_k.lora_up.weight\n", + "transformer.transformer_blocks.0.attn.to_k.lora_down.weight\n", + "transformer.transformer_blocks.0.attn.to_out.0.lora_up.weight\n", + "transformer.transformer_blocks.0.attn.to_out.0.lora_down.weight\n", + "transformer.transformer_blocks.0.attn.to_q.lora_up.weight\n", + "transformer.transformer_blocks.0.attn.to_q.lora_down.weight\n", + "transformer.transformer_blocks.0.attn.to_v.lora_up.weight\n", + "transformer.transformer_blocks.0.attn.to_v.lora_down.weight\n", + "transformer.transformer_blocks.0.ff.net.0.proj.lora_up.weight\n", + "transformer.transformer_blocks.0.ff.net.0.proj.lora_down.weight\n", + "transformer.transformer_blocks.0.ff.net.2.lora_up.weight\n", + "transformer.transformer_blocks.0.ff.net.2.lora_down.weight\n", + "transformer.transformer_blocks.0.ff_context.net.0.proj.lora_up.weight\n", + "transformer.transformer_blocks.0.ff_context.net.0.proj.lora_down.weight\n", + "transformer.transformer_blocks.0.ff_context.net.2.lora_up.weight\n", + "transformer.transformer_blocks.0.ff_context.net.2.lora_down.weight\n", + "transformer.transformer_blocks.0.norm1.linear.lora_up.weight\n", + "transformer.transformer_blocks.0.norm1.linear.lora_down.weight\n", + "transformer.transformer_blocks.0.norm1_context.linear.lora_up.weight\n", + "transformer.transformer_blocks.0.norm1_context.linear.lora_down.weight\n", + "transformer.transformer_blocks.1.attn.add_k_proj.lora_up.weight\n", + "transformer.transformer_blocks.1.attn.add_k_proj.lora_down.weight\n", + "transformer.transformer_blocks.1.attn.add_q_proj.lora_up.weight\n", + "transformer.transformer_blocks.1.attn.add_q_proj.lora_down.weight\n", + "transformer.transformer_blocks.1.attn.add_v_proj.lora_up.weight\n", + "transformer.transformer_blocks.1.attn.add_v_proj.lora_down.weight\n", + "transformer.transformer_blocks.1.attn.to_add_out.lora_up.weight\n", + "transformer.transformer_blocks.1.attn.to_add_out.lora_down.weight\n", + "transformer.transformer_blocks.1.attn.to_k.lora_up.weight\n", + "transformer.transformer_blocks.1.attn.to_k.lora_down.weight\n", + "transformer.transformer_blocks.1.attn.to_out.0.lora_up.weight\n", + "transformer.transformer_blocks.1.attn.to_out.0.lora_down.weight\n", + "transformer.transformer_blocks.1.attn.to_q.lora_up.weight\n", + "transformer.transformer_blocks.1.attn.to_q.lora_down.weight\n", + "transformer.transformer_blocks.1.attn.to_v.lora_up.weight\n", + "transformer.transformer_blocks.1.attn.to_v.lora_down.weight\n", + "transformer.transformer_blocks.1.ff.net.0.proj.lora_up.weight\n", + "transformer.transformer_blocks.1.ff.net.0.proj.lora_down.weight\n", + "transformer.transformer_blocks.1.ff.net.2.lora_up.weight\n", + "transformer.transformer_blocks.1.ff.net.2.lora_down.weight\n", + "transformer.transformer_blocks.1.ff_context.net.0.proj.lora_up.weight\n", + "transformer.transformer_blocks.1.ff_context.net.0.proj.lora_down.weight\n", + "transformer.transformer_blocks.1.ff_context.net.2.lora_up.weight\n", + "transformer.transformer_blocks.1.ff_context.net.2.lora_down.weight\n", + "transformer.transformer_blocks.1.norm1.linear.lora_up.weight\n", + "transformer.transformer_blocks.1.norm1.linear.lora_down.weight\n", + "transformer.transformer_blocks.1.norm1_context.linear.lora_up.weight\n", + "transformer.transformer_blocks.1.norm1_context.linear.lora_down.weight\n", + "transformer.transformer_blocks.10.attn.add_k_proj.lora_up.weight\n", + "transformer.transformer_blocks.10.attn.add_k_proj.lora_down.weight\n", + "transformer.transformer_blocks.10.attn.add_q_proj.lora_up.weight\n", + "transformer.transformer_blocks.10.attn.add_q_proj.lora_down.weight\n", + "transformer.transformer_blocks.10.attn.add_v_proj.lora_up.weight\n", + "transformer.transformer_blocks.10.attn.add_v_proj.lora_down.weight\n", + "transformer.transformer_blocks.10.attn.to_add_out.lora_up.weight\n", + "transformer.transformer_blocks.10.attn.to_add_out.lora_down.weight\n", + "transformer.transformer_blocks.10.attn.to_k.lora_up.weight\n", + "transformer.transformer_blocks.10.attn.to_k.lora_down.weight\n", + "transformer.transformer_blocks.10.attn.to_out.0.lora_up.weight\n", + "transformer.transformer_blocks.10.attn.to_out.0.lora_down.weight\n", + "transformer.transformer_blocks.10.attn.to_q.lora_up.weight\n", + "transformer.transformer_blocks.10.attn.to_q.lora_down.weight\n", + "transformer.transformer_blocks.10.attn.to_v.lora_up.weight\n", + "transformer.transformer_blocks.10.attn.to_v.lora_down.weight\n", + "transformer.transformer_blocks.10.ff.net.0.proj.lora_up.weight\n", + "transformer.transformer_blocks.10.ff.net.0.proj.lora_down.weight\n", + "transformer.transformer_blocks.10.ff.net.2.lora_up.weight\n", + "transformer.transformer_blocks.10.ff.net.2.lora_down.weight\n", + "transformer.transformer_blocks.10.ff_context.net.0.proj.lora_up.weight\n", + "transformer.transformer_blocks.10.ff_context.net.0.proj.lora_down.weight\n", + "transformer.transformer_blocks.10.ff_context.net.2.lora_up.weight\n", + "transformer.transformer_blocks.10.ff_context.net.2.lora_down.weight\n", + "transformer.transformer_blocks.10.norm1.linear.lora_up.weight\n", + "transformer.transformer_blocks.10.norm1.linear.lora_down.weight\n", + "transformer.transformer_blocks.10.norm1_context.linear.lora_up.weight\n", + "transformer.transformer_blocks.10.norm1_context.linear.lora_down.weight\n", + "transformer.transformer_blocks.11.attn.add_k_proj.lora_up.weight\n", + "transformer.transformer_blocks.11.attn.add_k_proj.lora_down.weight\n", + "transformer.transformer_blocks.11.attn.add_q_proj.lora_up.weight\n", + "transformer.transformer_blocks.11.attn.add_q_proj.lora_down.weight\n", + "transformer.transformer_blocks.11.attn.add_v_proj.lora_up.weight\n", + "transformer.transformer_blocks.11.attn.add_v_proj.lora_down.weight\n", + "transformer.transformer_blocks.11.attn.to_add_out.lora_up.weight\n", + "transformer.transformer_blocks.11.attn.to_add_out.lora_down.weight\n", + "transformer.transformer_blocks.11.attn.to_k.lora_up.weight\n", + "transformer.transformer_blocks.11.attn.to_k.lora_down.weight\n", + "transformer.transformer_blocks.11.attn.to_out.0.lora_up.weight\n", + "transformer.transformer_blocks.11.attn.to_out.0.lora_down.weight\n", + "transformer.transformer_blocks.11.attn.to_q.lora_up.weight\n", + "transformer.transformer_blocks.11.attn.to_q.lora_down.weight\n", + "transformer.transformer_blocks.11.attn.to_v.lora_up.weight\n", + "transformer.transformer_blocks.11.attn.to_v.lora_down.weight\n", + "transformer.transformer_blocks.11.ff.net.0.proj.lora_up.weight\n", + "transformer.transformer_blocks.11.ff.net.0.proj.lora_down.weight\n", + "transformer.transformer_blocks.11.ff.net.2.lora_up.weight\n", + "transformer.transformer_blocks.11.ff.net.2.lora_down.weight\n", + "transformer.transformer_blocks.11.ff_context.net.0.proj.lora_up.weight\n", + "transformer.transformer_blocks.11.ff_context.net.0.proj.lora_down.weight\n", + "transformer.transformer_blocks.11.ff_context.net.2.lora_up.weight\n", + "transformer.transformer_blocks.11.ff_context.net.2.lora_down.weight\n", + "transformer.transformer_blocks.11.norm1.linear.lora_up.weight\n", + "transformer.transformer_blocks.11.norm1.linear.lora_down.weight\n", + "transformer.transformer_blocks.11.norm1_context.linear.lora_up.weight\n", + "transformer.transformer_blocks.11.norm1_context.linear.lora_down.weight\n", + "transformer.transformer_blocks.12.attn.add_k_proj.lora_up.weight\n", + "transformer.transformer_blocks.12.attn.add_k_proj.lora_down.weight\n", + "transformer.transformer_blocks.12.attn.add_q_proj.lora_up.weight\n", + "transformer.transformer_blocks.12.attn.add_q_proj.lora_down.weight\n", + "transformer.transformer_blocks.12.attn.add_v_proj.lora_up.weight\n", + "transformer.transformer_blocks.12.attn.add_v_proj.lora_down.weight\n", + "transformer.transformer_blocks.12.attn.to_add_out.lora_up.weight\n", + "transformer.transformer_blocks.12.attn.to_add_out.lora_down.weight\n", + "transformer.transformer_blocks.12.attn.to_k.lora_up.weight\n", + "transformer.transformer_blocks.12.attn.to_k.lora_down.weight\n", + "transformer.transformer_blocks.12.attn.to_out.0.lora_up.weight\n", + "transformer.transformer_blocks.12.attn.to_out.0.lora_down.weight\n", + "transformer.transformer_blocks.12.attn.to_q.lora_up.weight\n", + "transformer.transformer_blocks.12.attn.to_q.lora_down.weight\n", + "transformer.transformer_blocks.12.attn.to_v.lora_up.weight\n", + "transformer.transformer_blocks.12.attn.to_v.lora_down.weight\n", + "transformer.transformer_blocks.12.ff.net.0.proj.lora_up.weight\n", + "transformer.transformer_blocks.12.ff.net.0.proj.lora_down.weight\n", + "transformer.transformer_blocks.12.ff.net.2.lora_up.weight\n", + "transformer.transformer_blocks.12.ff.net.2.lora_down.weight\n", + "transformer.transformer_blocks.12.ff_context.net.0.proj.lora_up.weight\n", + "transformer.transformer_blocks.12.ff_context.net.0.proj.lora_down.weight\n", + "transformer.transformer_blocks.12.ff_context.net.2.lora_up.weight\n", + "transformer.transformer_blocks.12.ff_context.net.2.lora_down.weight\n", + "transformer.transformer_blocks.12.norm1.linear.lora_up.weight\n", + "transformer.transformer_blocks.12.norm1.linear.lora_down.weight\n", + "transformer.transformer_blocks.12.norm1_context.linear.lora_up.weight\n", + "transformer.transformer_blocks.12.norm1_context.linear.lora_down.weight\n", + "transformer.transformer_blocks.13.attn.add_k_proj.lora_up.weight\n", + "transformer.transformer_blocks.13.attn.add_k_proj.lora_down.weight\n", + "transformer.transformer_blocks.13.attn.add_q_proj.lora_up.weight\n", + "transformer.transformer_blocks.13.attn.add_q_proj.lora_down.weight\n", + "transformer.transformer_blocks.13.attn.add_v_proj.lora_up.weight\n", + "transformer.transformer_blocks.13.attn.add_v_proj.lora_down.weight\n", + "transformer.transformer_blocks.13.attn.to_add_out.lora_up.weight\n", + "transformer.transformer_blocks.13.attn.to_add_out.lora_down.weight\n", + "transformer.transformer_blocks.13.attn.to_k.lora_up.weight\n", + "transformer.transformer_blocks.13.attn.to_k.lora_down.weight\n", + "transformer.transformer_blocks.13.attn.to_out.0.lora_up.weight\n", + "transformer.transformer_blocks.13.attn.to_out.0.lora_down.weight\n", + "transformer.transformer_blocks.13.attn.to_q.lora_up.weight\n", + "transformer.transformer_blocks.13.attn.to_q.lora_down.weight\n", + "transformer.transformer_blocks.13.attn.to_v.lora_up.weight\n", + "transformer.transformer_blocks.13.attn.to_v.lora_down.weight\n", + "transformer.transformer_blocks.13.ff.net.0.proj.lora_up.weight\n", + "transformer.transformer_blocks.13.ff.net.0.proj.lora_down.weight\n", + "transformer.transformer_blocks.13.ff.net.2.lora_up.weight\n", + "transformer.transformer_blocks.13.ff.net.2.lora_down.weight\n", + "transformer.transformer_blocks.13.ff_context.net.0.proj.lora_up.weight\n", + "transformer.transformer_blocks.13.ff_context.net.0.proj.lora_down.weight\n", + "transformer.transformer_blocks.13.ff_context.net.2.lora_up.weight\n", + "transformer.transformer_blocks.13.ff_context.net.2.lora_down.weight\n", + "transformer.transformer_blocks.13.norm1.linear.lora_up.weight\n", + "transformer.transformer_blocks.13.norm1.linear.lora_down.weight\n", + "transformer.transformer_blocks.13.norm1_context.linear.lora_up.weight\n", + "transformer.transformer_blocks.13.norm1_context.linear.lora_down.weight\n", + "transformer.transformer_blocks.14.attn.add_k_proj.lora_up.weight\n", + "transformer.transformer_blocks.14.attn.add_k_proj.lora_down.weight\n", + "transformer.transformer_blocks.14.attn.add_q_proj.lora_up.weight\n", + "transformer.transformer_blocks.14.attn.add_q_proj.lora_down.weight\n", + "transformer.transformer_blocks.14.attn.add_v_proj.lora_up.weight\n", + "transformer.transformer_blocks.14.attn.add_v_proj.lora_down.weight\n", + "transformer.transformer_blocks.14.attn.to_add_out.lora_up.weight\n", + "transformer.transformer_blocks.14.attn.to_add_out.lora_down.weight\n", + "transformer.transformer_blocks.14.attn.to_k.lora_up.weight\n", + "transformer.transformer_blocks.14.attn.to_k.lora_down.weight\n", + "transformer.transformer_blocks.14.attn.to_out.0.lora_up.weight\n", + "transformer.transformer_blocks.14.attn.to_out.0.lora_down.weight\n", + "transformer.transformer_blocks.14.attn.to_q.lora_up.weight\n", + "transformer.transformer_blocks.14.attn.to_q.lora_down.weight\n", + "transformer.transformer_blocks.14.attn.to_v.lora_up.weight\n", + "transformer.transformer_blocks.14.attn.to_v.lora_down.weight\n", + "transformer.transformer_blocks.14.ff.net.0.proj.lora_up.weight\n", + "transformer.transformer_blocks.14.ff.net.0.proj.lora_down.weight\n", + "transformer.transformer_blocks.14.ff.net.2.lora_up.weight\n", + "transformer.transformer_blocks.14.ff.net.2.lora_down.weight\n", + "transformer.transformer_blocks.14.ff_context.net.0.proj.lora_up.weight\n", + "transformer.transformer_blocks.14.ff_context.net.0.proj.lora_down.weight\n", + "transformer.transformer_blocks.14.ff_context.net.2.lora_up.weight\n", + "transformer.transformer_blocks.14.ff_context.net.2.lora_down.weight\n", + "transformer.transformer_blocks.14.norm1.linear.lora_up.weight\n", + "transformer.transformer_blocks.14.norm1.linear.lora_down.weight\n", + "transformer.transformer_blocks.14.norm1_context.linear.lora_up.weight\n", + "transformer.transformer_blocks.14.norm1_context.linear.lora_down.weight\n", + "transformer.transformer_blocks.15.attn.add_k_proj.lora_up.weight\n", + "transformer.transformer_blocks.15.attn.add_k_proj.lora_down.weight\n", + "transformer.transformer_blocks.15.attn.add_q_proj.lora_up.weight\n", + "transformer.transformer_blocks.15.attn.add_q_proj.lora_down.weight\n", + "transformer.transformer_blocks.15.attn.add_v_proj.lora_up.weight\n", + "transformer.transformer_blocks.15.attn.add_v_proj.lora_down.weight\n", + "transformer.transformer_blocks.15.attn.to_add_out.lora_up.weight\n", + "transformer.transformer_blocks.15.attn.to_add_out.lora_down.weight\n", + "transformer.transformer_blocks.15.attn.to_k.lora_up.weight\n", + "transformer.transformer_blocks.15.attn.to_k.lora_down.weight\n", + "transformer.transformer_blocks.15.attn.to_out.0.lora_up.weight\n", + "transformer.transformer_blocks.15.attn.to_out.0.lora_down.weight\n", + "transformer.transformer_blocks.15.attn.to_q.lora_up.weight\n", + "transformer.transformer_blocks.15.attn.to_q.lora_down.weight\n", + "transformer.transformer_blocks.15.attn.to_v.lora_up.weight\n", + "transformer.transformer_blocks.15.attn.to_v.lora_down.weight\n", + "transformer.transformer_blocks.15.ff.net.0.proj.lora_up.weight\n", + "transformer.transformer_blocks.15.ff.net.0.proj.lora_down.weight\n", + "transformer.transformer_blocks.15.ff.net.2.lora_up.weight\n", + "transformer.transformer_blocks.15.ff.net.2.lora_down.weight\n", + "transformer.transformer_blocks.15.ff_context.net.0.proj.lora_up.weight\n", + "transformer.transformer_blocks.15.ff_context.net.0.proj.lora_down.weight\n", + "transformer.transformer_blocks.15.ff_context.net.2.lora_up.weight\n", + "transformer.transformer_blocks.15.ff_context.net.2.lora_down.weight\n", + "transformer.transformer_blocks.15.norm1.linear.lora_up.weight\n", + "transformer.transformer_blocks.15.norm1.linear.lora_down.weight\n", + "transformer.transformer_blocks.15.norm1_context.linear.lora_up.weight\n", + "transformer.transformer_blocks.15.norm1_context.linear.lora_down.weight\n", + "transformer.transformer_blocks.16.attn.add_k_proj.lora_up.weight\n", + "transformer.transformer_blocks.16.attn.add_k_proj.lora_down.weight\n", + "transformer.transformer_blocks.16.attn.add_q_proj.lora_up.weight\n", + "transformer.transformer_blocks.16.attn.add_q_proj.lora_down.weight\n", + "transformer.transformer_blocks.16.attn.add_v_proj.lora_up.weight\n", + "transformer.transformer_blocks.16.attn.add_v_proj.lora_down.weight\n", + "transformer.transformer_blocks.16.attn.to_add_out.lora_up.weight\n", + "transformer.transformer_blocks.16.attn.to_add_out.lora_down.weight\n", + "transformer.transformer_blocks.16.attn.to_k.lora_up.weight\n", + "transformer.transformer_blocks.16.attn.to_k.lora_down.weight\n", + "transformer.transformer_blocks.16.attn.to_out.0.lora_up.weight\n", + "transformer.transformer_blocks.16.attn.to_out.0.lora_down.weight\n", + "transformer.transformer_blocks.16.attn.to_q.lora_up.weight\n", + "transformer.transformer_blocks.16.attn.to_q.lora_down.weight\n", + "transformer.transformer_blocks.16.attn.to_v.lora_up.weight\n", + "transformer.transformer_blocks.16.attn.to_v.lora_down.weight\n", + "transformer.transformer_blocks.16.ff.net.0.proj.lora_up.weight\n", + "transformer.transformer_blocks.16.ff.net.0.proj.lora_down.weight\n", + "transformer.transformer_blocks.16.ff.net.2.lora_up.weight\n", + "transformer.transformer_blocks.16.ff.net.2.lora_down.weight\n", + "transformer.transformer_blocks.16.ff_context.net.0.proj.lora_up.weight\n", + "transformer.transformer_blocks.16.ff_context.net.0.proj.lora_down.weight\n", + "transformer.transformer_blocks.16.ff_context.net.2.lora_up.weight\n", + "transformer.transformer_blocks.16.ff_context.net.2.lora_down.weight\n", + "transformer.transformer_blocks.16.norm1.linear.lora_up.weight\n", + "transformer.transformer_blocks.16.norm1.linear.lora_down.weight\n", + "transformer.transformer_blocks.16.norm1_context.linear.lora_up.weight\n", + "transformer.transformer_blocks.16.norm1_context.linear.lora_down.weight\n", + "transformer.transformer_blocks.17.attn.add_k_proj.lora_up.weight\n", + "transformer.transformer_blocks.17.attn.add_k_proj.lora_down.weight\n", + "transformer.transformer_blocks.17.attn.add_q_proj.lora_up.weight\n", + "transformer.transformer_blocks.17.attn.add_q_proj.lora_down.weight\n", + "transformer.transformer_blocks.17.attn.add_v_proj.lora_up.weight\n", + "transformer.transformer_blocks.17.attn.add_v_proj.lora_down.weight\n", + "transformer.transformer_blocks.17.attn.to_add_out.lora_up.weight\n", + "transformer.transformer_blocks.17.attn.to_add_out.lora_down.weight\n", + "transformer.transformer_blocks.17.attn.to_k.lora_up.weight\n", + "transformer.transformer_blocks.17.attn.to_k.lora_down.weight\n", + "transformer.transformer_blocks.17.attn.to_out.0.lora_up.weight\n", + "transformer.transformer_blocks.17.attn.to_out.0.lora_down.weight\n", + "transformer.transformer_blocks.17.attn.to_q.lora_up.weight\n", + "transformer.transformer_blocks.17.attn.to_q.lora_down.weight\n", + "transformer.transformer_blocks.17.attn.to_v.lora_up.weight\n", + "transformer.transformer_blocks.17.attn.to_v.lora_down.weight\n", + "transformer.transformer_blocks.17.ff.net.0.proj.lora_up.weight\n", + "transformer.transformer_blocks.17.ff.net.0.proj.lora_down.weight\n", + "transformer.transformer_blocks.17.ff.net.2.lora_up.weight\n", + "transformer.transformer_blocks.17.ff.net.2.lora_down.weight\n", + "transformer.transformer_blocks.17.ff_context.net.0.proj.lora_up.weight\n", + "transformer.transformer_blocks.17.ff_context.net.0.proj.lora_down.weight\n", + "transformer.transformer_blocks.17.ff_context.net.2.lora_up.weight\n", + "transformer.transformer_blocks.17.ff_context.net.2.lora_down.weight\n", + "transformer.transformer_blocks.17.norm1.linear.lora_up.weight\n", + "transformer.transformer_blocks.17.norm1.linear.lora_down.weight\n", + "transformer.transformer_blocks.17.norm1_context.linear.lora_up.weight\n", + "transformer.transformer_blocks.17.norm1_context.linear.lora_down.weight\n", + "transformer.transformer_blocks.18.attn.add_k_proj.lora_up.weight\n", + "transformer.transformer_blocks.18.attn.add_k_proj.lora_down.weight\n", + "transformer.transformer_blocks.18.attn.add_q_proj.lora_up.weight\n", + "transformer.transformer_blocks.18.attn.add_q_proj.lora_down.weight\n", + "transformer.transformer_blocks.18.attn.add_v_proj.lora_up.weight\n", + "transformer.transformer_blocks.18.attn.add_v_proj.lora_down.weight\n", + "transformer.transformer_blocks.18.attn.to_add_out.lora_up.weight\n", + "transformer.transformer_blocks.18.attn.to_add_out.lora_down.weight\n", + "transformer.transformer_blocks.18.attn.to_k.lora_up.weight\n", + "transformer.transformer_blocks.18.attn.to_k.lora_down.weight\n", + "transformer.transformer_blocks.18.attn.to_out.0.lora_up.weight\n", + "transformer.transformer_blocks.18.attn.to_out.0.lora_down.weight\n", + "transformer.transformer_blocks.18.attn.to_q.lora_up.weight\n", + "transformer.transformer_blocks.18.attn.to_q.lora_down.weight\n", + "transformer.transformer_blocks.18.attn.to_v.lora_up.weight\n", + "transformer.transformer_blocks.18.attn.to_v.lora_down.weight\n", + "transformer.transformer_blocks.18.ff.net.0.proj.lora_up.weight\n", + "transformer.transformer_blocks.18.ff.net.0.proj.lora_down.weight\n", + "transformer.transformer_blocks.18.ff.net.2.lora_up.weight\n", + "transformer.transformer_blocks.18.ff.net.2.lora_down.weight\n", + "transformer.transformer_blocks.18.ff_context.net.0.proj.lora_up.weight\n", + "transformer.transformer_blocks.18.ff_context.net.0.proj.lora_down.weight\n", + "transformer.transformer_blocks.18.ff_context.net.2.lora_up.weight\n", + "transformer.transformer_blocks.18.ff_context.net.2.lora_down.weight\n", + "transformer.transformer_blocks.18.norm1.linear.lora_up.weight\n", + "transformer.transformer_blocks.18.norm1.linear.lora_down.weight\n", + "transformer.transformer_blocks.18.norm1_context.linear.lora_up.weight\n", + "transformer.transformer_blocks.18.norm1_context.linear.lora_down.weight\n", + "transformer.transformer_blocks.2.attn.add_k_proj.lora_up.weight\n", + "transformer.transformer_blocks.2.attn.add_k_proj.lora_down.weight\n", + "transformer.transformer_blocks.2.attn.add_q_proj.lora_up.weight\n", + "transformer.transformer_blocks.2.attn.add_q_proj.lora_down.weight\n", + "transformer.transformer_blocks.2.attn.add_v_proj.lora_up.weight\n", + "transformer.transformer_blocks.2.attn.add_v_proj.lora_down.weight\n", + "transformer.transformer_blocks.2.attn.to_add_out.lora_up.weight\n", + "transformer.transformer_blocks.2.attn.to_add_out.lora_down.weight\n", + "transformer.transformer_blocks.2.attn.to_k.lora_up.weight\n", + "transformer.transformer_blocks.2.attn.to_k.lora_down.weight\n", + "transformer.transformer_blocks.2.attn.to_out.0.lora_up.weight\n", + "transformer.transformer_blocks.2.attn.to_out.0.lora_down.weight\n", + "transformer.transformer_blocks.2.attn.to_q.lora_up.weight\n", + "transformer.transformer_blocks.2.attn.to_q.lora_down.weight\n", + "transformer.transformer_blocks.2.attn.to_v.lora_up.weight\n", + "transformer.transformer_blocks.2.attn.to_v.lora_down.weight\n", + "transformer.transformer_blocks.2.ff.net.0.proj.lora_up.weight\n", + "transformer.transformer_blocks.2.ff.net.0.proj.lora_down.weight\n", + "transformer.transformer_blocks.2.ff.net.2.lora_up.weight\n", + "transformer.transformer_blocks.2.ff.net.2.lora_down.weight\n", + "transformer.transformer_blocks.2.ff_context.net.0.proj.lora_up.weight\n", + "transformer.transformer_blocks.2.ff_context.net.0.proj.lora_down.weight\n", + "transformer.transformer_blocks.2.ff_context.net.2.lora_up.weight\n", + "transformer.transformer_blocks.2.ff_context.net.2.lora_down.weight\n", + "transformer.transformer_blocks.2.norm1.linear.lora_up.weight\n", + "transformer.transformer_blocks.2.norm1.linear.lora_down.weight\n", + "transformer.transformer_blocks.2.norm1_context.linear.lora_up.weight\n", + "transformer.transformer_blocks.2.norm1_context.linear.lora_down.weight\n", + "transformer.transformer_blocks.3.attn.add_k_proj.lora_up.weight\n", + "transformer.transformer_blocks.3.attn.add_k_proj.lora_down.weight\n", + "transformer.transformer_blocks.3.attn.add_q_proj.lora_up.weight\n", + "transformer.transformer_blocks.3.attn.add_q_proj.lora_down.weight\n", + "transformer.transformer_blocks.3.attn.add_v_proj.lora_up.weight\n", + "transformer.transformer_blocks.3.attn.add_v_proj.lora_down.weight\n", + "transformer.transformer_blocks.3.attn.to_add_out.lora_up.weight\n", + "transformer.transformer_blocks.3.attn.to_add_out.lora_down.weight\n", + "transformer.transformer_blocks.3.attn.to_k.lora_up.weight\n", + "transformer.transformer_blocks.3.attn.to_k.lora_down.weight\n", + "transformer.transformer_blocks.3.attn.to_out.0.lora_up.weight\n", + "transformer.transformer_blocks.3.attn.to_out.0.lora_down.weight\n", + "transformer.transformer_blocks.3.attn.to_q.lora_up.weight\n", + "transformer.transformer_blocks.3.attn.to_q.lora_down.weight\n", + "transformer.transformer_blocks.3.attn.to_v.lora_up.weight\n", + "transformer.transformer_blocks.3.attn.to_v.lora_down.weight\n", + "transformer.transformer_blocks.3.ff.net.0.proj.lora_up.weight\n", + "transformer.transformer_blocks.3.ff.net.0.proj.lora_down.weight\n", + "transformer.transformer_blocks.3.ff.net.2.lora_up.weight\n", + "transformer.transformer_blocks.3.ff.net.2.lora_down.weight\n", + "transformer.transformer_blocks.3.ff_context.net.0.proj.lora_up.weight\n", + "transformer.transformer_blocks.3.ff_context.net.0.proj.lora_down.weight\n", + "transformer.transformer_blocks.3.ff_context.net.2.lora_up.weight\n", + "transformer.transformer_blocks.3.ff_context.net.2.lora_down.weight\n", + "transformer.transformer_blocks.3.norm1.linear.lora_up.weight\n", + "transformer.transformer_blocks.3.norm1.linear.lora_down.weight\n", + "transformer.transformer_blocks.3.norm1_context.linear.lora_up.weight\n", + "transformer.transformer_blocks.3.norm1_context.linear.lora_down.weight\n", + "transformer.transformer_blocks.4.attn.add_k_proj.lora_up.weight\n", + "transformer.transformer_blocks.4.attn.add_k_proj.lora_down.weight\n", + "transformer.transformer_blocks.4.attn.add_q_proj.lora_up.weight\n", + "transformer.transformer_blocks.4.attn.add_q_proj.lora_down.weight\n", + "transformer.transformer_blocks.4.attn.add_v_proj.lora_up.weight\n", + "transformer.transformer_blocks.4.attn.add_v_proj.lora_down.weight\n", + "transformer.transformer_blocks.4.attn.to_add_out.lora_up.weight\n", + "transformer.transformer_blocks.4.attn.to_add_out.lora_down.weight\n", + "transformer.transformer_blocks.4.attn.to_k.lora_up.weight\n", + "transformer.transformer_blocks.4.attn.to_k.lora_down.weight\n", + "transformer.transformer_blocks.4.attn.to_out.0.lora_up.weight\n", + "transformer.transformer_blocks.4.attn.to_out.0.lora_down.weight\n", + "transformer.transformer_blocks.4.attn.to_q.lora_up.weight\n", + "transformer.transformer_blocks.4.attn.to_q.lora_down.weight\n", + "transformer.transformer_blocks.4.attn.to_v.lora_up.weight\n", + "transformer.transformer_blocks.4.attn.to_v.lora_down.weight\n", + "transformer.transformer_blocks.4.ff.net.0.proj.lora_up.weight\n", + "transformer.transformer_blocks.4.ff.net.0.proj.lora_down.weight\n", + "transformer.transformer_blocks.4.ff.net.2.lora_up.weight\n", + "transformer.transformer_blocks.4.ff.net.2.lora_down.weight\n", + "transformer.transformer_blocks.4.ff_context.net.0.proj.lora_up.weight\n", + "transformer.transformer_blocks.4.ff_context.net.0.proj.lora_down.weight\n", + "transformer.transformer_blocks.4.ff_context.net.2.lora_up.weight\n", + "transformer.transformer_blocks.4.ff_context.net.2.lora_down.weight\n", + "transformer.transformer_blocks.4.norm1.linear.lora_up.weight\n", + "transformer.transformer_blocks.4.norm1.linear.lora_down.weight\n", + "transformer.transformer_blocks.4.norm1_context.linear.lora_up.weight\n", + "transformer.transformer_blocks.4.norm1_context.linear.lora_down.weight\n", + "transformer.transformer_blocks.5.attn.add_k_proj.lora_up.weight\n", + "transformer.transformer_blocks.5.attn.add_k_proj.lora_down.weight\n", + "transformer.transformer_blocks.5.attn.add_q_proj.lora_up.weight\n", + "transformer.transformer_blocks.5.attn.add_q_proj.lora_down.weight\n", + "transformer.transformer_blocks.5.attn.add_v_proj.lora_up.weight\n", + "transformer.transformer_blocks.5.attn.add_v_proj.lora_down.weight\n", + "transformer.transformer_blocks.5.attn.to_add_out.lora_up.weight\n", + "transformer.transformer_blocks.5.attn.to_add_out.lora_down.weight\n", + "transformer.transformer_blocks.5.attn.to_k.lora_up.weight\n", + "transformer.transformer_blocks.5.attn.to_k.lora_down.weight\n", + "transformer.transformer_blocks.5.attn.to_out.0.lora_up.weight\n", + "transformer.transformer_blocks.5.attn.to_out.0.lora_down.weight\n", + "transformer.transformer_blocks.5.attn.to_q.lora_up.weight\n", + "transformer.transformer_blocks.5.attn.to_q.lora_down.weight\n", + "transformer.transformer_blocks.5.attn.to_v.lora_up.weight\n", + "transformer.transformer_blocks.5.attn.to_v.lora_down.weight\n", + "transformer.transformer_blocks.5.ff.net.0.proj.lora_up.weight\n", + "transformer.transformer_blocks.5.ff.net.0.proj.lora_down.weight\n", + "transformer.transformer_blocks.5.ff.net.2.lora_up.weight\n", + "transformer.transformer_blocks.5.ff.net.2.lora_down.weight\n", + "transformer.transformer_blocks.5.ff_context.net.0.proj.lora_up.weight\n", + "transformer.transformer_blocks.5.ff_context.net.0.proj.lora_down.weight\n", + "transformer.transformer_blocks.5.ff_context.net.2.lora_up.weight\n", + "transformer.transformer_blocks.5.ff_context.net.2.lora_down.weight\n", + "transformer.transformer_blocks.5.norm1.linear.lora_up.weight\n", + "transformer.transformer_blocks.5.norm1.linear.lora_down.weight\n", + "transformer.transformer_blocks.5.norm1_context.linear.lora_up.weight\n", + "transformer.transformer_blocks.5.norm1_context.linear.lora_down.weight\n", + "transformer.transformer_blocks.6.attn.add_k_proj.lora_up.weight\n", + "transformer.transformer_blocks.6.attn.add_k_proj.lora_down.weight\n", + "transformer.transformer_blocks.6.attn.add_q_proj.lora_up.weight\n", + "transformer.transformer_blocks.6.attn.add_q_proj.lora_down.weight\n", + "transformer.transformer_blocks.6.attn.add_v_proj.lora_up.weight\n", + "transformer.transformer_blocks.6.attn.add_v_proj.lora_down.weight\n", + "transformer.transformer_blocks.6.attn.to_add_out.lora_up.weight\n", + "transformer.transformer_blocks.6.attn.to_add_out.lora_down.weight\n", + "transformer.transformer_blocks.6.attn.to_k.lora_up.weight\n", + "transformer.transformer_blocks.6.attn.to_k.lora_down.weight\n", + "transformer.transformer_blocks.6.attn.to_out.0.lora_up.weight\n", + "transformer.transformer_blocks.6.attn.to_out.0.lora_down.weight\n", + "transformer.transformer_blocks.6.attn.to_q.lora_up.weight\n", + "transformer.transformer_blocks.6.attn.to_q.lora_down.weight\n", + "transformer.transformer_blocks.6.attn.to_v.lora_up.weight\n", + "transformer.transformer_blocks.6.attn.to_v.lora_down.weight\n", + "transformer.transformer_blocks.6.ff.net.0.proj.lora_up.weight\n", + "transformer.transformer_blocks.6.ff.net.0.proj.lora_down.weight\n", + "transformer.transformer_blocks.6.ff.net.2.lora_up.weight\n", + "transformer.transformer_blocks.6.ff.net.2.lora_down.weight\n", + "transformer.transformer_blocks.6.ff_context.net.0.proj.lora_up.weight\n", + "transformer.transformer_blocks.6.ff_context.net.0.proj.lora_down.weight\n", + "transformer.transformer_blocks.6.ff_context.net.2.lora_up.weight\n", + "transformer.transformer_blocks.6.ff_context.net.2.lora_down.weight\n", + "transformer.transformer_blocks.6.norm1.linear.lora_up.weight\n", + "transformer.transformer_blocks.6.norm1.linear.lora_down.weight\n", + "transformer.transformer_blocks.6.norm1_context.linear.lora_up.weight\n", + "transformer.transformer_blocks.6.norm1_context.linear.lora_down.weight\n", + "transformer.transformer_blocks.7.attn.add_k_proj.lora_up.weight\n", + "transformer.transformer_blocks.7.attn.add_k_proj.lora_down.weight\n", + "transformer.transformer_blocks.7.attn.add_q_proj.lora_up.weight\n", + "transformer.transformer_blocks.7.attn.add_q_proj.lora_down.weight\n", + "transformer.transformer_blocks.7.attn.add_v_proj.lora_up.weight\n", + "transformer.transformer_blocks.7.attn.add_v_proj.lora_down.weight\n", + "transformer.transformer_blocks.7.attn.to_add_out.lora_up.weight\n", + "transformer.transformer_blocks.7.attn.to_add_out.lora_down.weight\n", + "transformer.transformer_blocks.7.attn.to_k.lora_up.weight\n", + "transformer.transformer_blocks.7.attn.to_k.lora_down.weight\n", + "transformer.transformer_blocks.7.attn.to_out.0.lora_up.weight\n", + "transformer.transformer_blocks.7.attn.to_out.0.lora_down.weight\n", + "transformer.transformer_blocks.7.attn.to_q.lora_up.weight\n", + "transformer.transformer_blocks.7.attn.to_q.lora_down.weight\n", + "transformer.transformer_blocks.7.attn.to_v.lora_up.weight\n", + "transformer.transformer_blocks.7.attn.to_v.lora_down.weight\n", + "transformer.transformer_blocks.7.ff.net.0.proj.lora_up.weight\n", + "transformer.transformer_blocks.7.ff.net.0.proj.lora_down.weight\n", + "transformer.transformer_blocks.7.ff.net.2.lora_up.weight\n", + "transformer.transformer_blocks.7.ff.net.2.lora_down.weight\n", + "transformer.transformer_blocks.7.ff_context.net.0.proj.lora_up.weight\n", + "transformer.transformer_blocks.7.ff_context.net.0.proj.lora_down.weight\n", + "transformer.transformer_blocks.7.ff_context.net.2.lora_up.weight\n", + "transformer.transformer_blocks.7.ff_context.net.2.lora_down.weight\n", + "transformer.transformer_blocks.7.norm1.linear.lora_up.weight\n", + "transformer.transformer_blocks.7.norm1.linear.lora_down.weight\n", + "transformer.transformer_blocks.7.norm1_context.linear.lora_up.weight\n", + "transformer.transformer_blocks.7.norm1_context.linear.lora_down.weight\n", + "transformer.transformer_blocks.8.attn.add_k_proj.lora_up.weight\n", + "transformer.transformer_blocks.8.attn.add_k_proj.lora_down.weight\n", + "transformer.transformer_blocks.8.attn.add_q_proj.lora_up.weight\n", + "transformer.transformer_blocks.8.attn.add_q_proj.lora_down.weight\n", + "transformer.transformer_blocks.8.attn.add_v_proj.lora_up.weight\n", + "transformer.transformer_blocks.8.attn.add_v_proj.lora_down.weight\n", + "transformer.transformer_blocks.8.attn.to_add_out.lora_up.weight\n", + "transformer.transformer_blocks.8.attn.to_add_out.lora_down.weight\n", + "transformer.transformer_blocks.8.attn.to_k.lora_up.weight\n", + "transformer.transformer_blocks.8.attn.to_k.lora_down.weight\n", + "transformer.transformer_blocks.8.attn.to_out.0.lora_up.weight\n", + "transformer.transformer_blocks.8.attn.to_out.0.lora_down.weight\n", + "transformer.transformer_blocks.8.attn.to_q.lora_up.weight\n", + "transformer.transformer_blocks.8.attn.to_q.lora_down.weight\n", + "transformer.transformer_blocks.8.attn.to_v.lora_up.weight\n", + "transformer.transformer_blocks.8.attn.to_v.lora_down.weight\n", + "transformer.transformer_blocks.8.ff.net.0.proj.lora_up.weight\n", + "transformer.transformer_blocks.8.ff.net.0.proj.lora_down.weight\n", + "transformer.transformer_blocks.8.ff.net.2.lora_up.weight\n", + "transformer.transformer_blocks.8.ff.net.2.lora_down.weight\n", + "transformer.transformer_blocks.8.ff_context.net.0.proj.lora_up.weight\n", + "transformer.transformer_blocks.8.ff_context.net.0.proj.lora_down.weight\n", + "transformer.transformer_blocks.8.ff_context.net.2.lora_up.weight\n", + "transformer.transformer_blocks.8.ff_context.net.2.lora_down.weight\n", + "transformer.transformer_blocks.8.norm1.linear.lora_up.weight\n", + "transformer.transformer_blocks.8.norm1.linear.lora_down.weight\n", + "transformer.transformer_blocks.8.norm1_context.linear.lora_up.weight\n", + "transformer.transformer_blocks.8.norm1_context.linear.lora_down.weight\n", + "transformer.transformer_blocks.9.attn.add_k_proj.lora_up.weight\n", + "transformer.transformer_blocks.9.attn.add_k_proj.lora_down.weight\n", + "transformer.transformer_blocks.9.attn.add_q_proj.lora_up.weight\n", + "transformer.transformer_blocks.9.attn.add_q_proj.lora_down.weight\n", + "transformer.transformer_blocks.9.attn.add_v_proj.lora_up.weight\n", + "transformer.transformer_blocks.9.attn.add_v_proj.lora_down.weight\n", + "transformer.transformer_blocks.9.attn.to_add_out.lora_up.weight\n", + "transformer.transformer_blocks.9.attn.to_add_out.lora_down.weight\n", + "transformer.transformer_blocks.9.attn.to_k.lora_up.weight\n", + "transformer.transformer_blocks.9.attn.to_k.lora_down.weight\n", + "transformer.transformer_blocks.9.attn.to_out.0.lora_up.weight\n", + "transformer.transformer_blocks.9.attn.to_out.0.lora_down.weight\n", + "transformer.transformer_blocks.9.attn.to_q.lora_up.weight\n", + "transformer.transformer_blocks.9.attn.to_q.lora_down.weight\n", + "transformer.transformer_blocks.9.attn.to_v.lora_up.weight\n", + "transformer.transformer_blocks.9.attn.to_v.lora_down.weight\n", + "transformer.transformer_blocks.9.ff.net.0.proj.lora_up.weight\n", + "transformer.transformer_blocks.9.ff.net.0.proj.lora_down.weight\n", + "transformer.transformer_blocks.9.ff.net.2.lora_up.weight\n", + "transformer.transformer_blocks.9.ff.net.2.lora_down.weight\n", + "transformer.transformer_blocks.9.ff_context.net.0.proj.lora_up.weight\n", + "transformer.transformer_blocks.9.ff_context.net.0.proj.lora_down.weight\n", + "transformer.transformer_blocks.9.ff_context.net.2.lora_up.weight\n", + "transformer.transformer_blocks.9.ff_context.net.2.lora_down.weight\n", + "transformer.transformer_blocks.9.norm1.linear.lora_up.weight\n", + "transformer.transformer_blocks.9.norm1.linear.lora_down.weight\n", + "transformer.transformer_blocks.9.norm1_context.linear.lora_up.weight\n", + "transformer.transformer_blocks.9.norm1_context.linear.lora_down.weight\n", + "---------STAR---------\n" + ] + } + ] + }, + { + "cell_type": "code", + "source": [ + "down = 'lora_unet_double_blocks_0_img_attn_qkv.lora_down.weight'\n", + "up = 'lora_unet_double_blocks_0_img_attn_qkv.lora_up.weight'\n", + "tgt = star\n", + "print(\"STAR\")\n", + "print(tgt[f'{up}'].shape)\n", + "#print(torch.matmul(tgt[f'{up}'],tgt[f'{down}']).shape)\n", + "\n", + "down = 'transformer.transformer_blocks.0.attn.to_k.lora_down.weight'\n", + "up = 'transformer.transformer_blocks.0.attn.to_k.lora_up.weight'\n", + "tgt = oily\n", + "print(\"VS. OILY\")\n", + "print(tgt[f'{up}'].shape)\n", + "#print(torch.matmul(tgt[f'{up}'],tgt[f'{down}']).shape)\n" + ], + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "GoDfgENYaWD7", + "outputId": "9336ae1a-6244-4e76-f291-82cda4482831" + }, + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "STAR\n", + "torch.Size([9216, 32])\n", + "VS. OILY\n", + "torch.Size([3072, 32])\n" + ] + } + ] + }, + { + "cell_type": "code", + "source": [ + "for key in oily:\n", + " print(oily[f'{key}'].shape)" + ], + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "xQhVLouEfmGE", + "outputId": "662176b3-480d-48eb-f5db-97ec71b5e970" + }, + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([9216, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([12288, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 15360])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([9216, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([12288, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 15360])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([9216, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([12288, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 15360])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([9216, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([12288, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 15360])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([9216, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([12288, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 15360])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([9216, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([12288, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 15360])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([9216, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([12288, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 15360])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([9216, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([12288, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 15360])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([9216, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([12288, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 15360])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([9216, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([12288, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 15360])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([9216, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([12288, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 15360])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([9216, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([12288, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 15360])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([9216, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([12288, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 15360])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([9216, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([12288, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 15360])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([9216, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([12288, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 15360])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([9216, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([12288, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 15360])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([9216, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([12288, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 15360])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([9216, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([12288, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 15360])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([9216, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([12288, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 15360])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([9216, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([12288, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 15360])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([9216, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([12288, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 15360])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([9216, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([12288, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 15360])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([9216, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([12288, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 15360])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([9216, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([12288, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 15360])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([9216, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([12288, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 15360])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([9216, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([12288, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 15360])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([9216, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([12288, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 15360])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([9216, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([12288, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 15360])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([9216, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([12288, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 15360])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([9216, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([12288, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 15360])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([9216, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([12288, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 15360])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([9216, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([12288, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 15360])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([9216, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([12288, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 15360])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([9216, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([12288, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 15360])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([9216, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([12288, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 15360])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([9216, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([12288, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 15360])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([9216, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([12288, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 15360])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([9216, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([12288, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 15360])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([12288, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 12288])\n", + "torch.Size([12288, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 12288])\n", + "torch.Size([18432, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([18432, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([12288, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 12288])\n", + "torch.Size([12288, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 12288])\n", + "torch.Size([18432, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([18432, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([12288, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 12288])\n", + "torch.Size([12288, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 12288])\n", + "torch.Size([18432, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([18432, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([12288, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 12288])\n", + "torch.Size([12288, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 12288])\n", + "torch.Size([18432, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([18432, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([12288, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 12288])\n", + "torch.Size([12288, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 12288])\n", + "torch.Size([18432, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([18432, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([12288, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 12288])\n", + "torch.Size([12288, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 12288])\n", + "torch.Size([18432, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([18432, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([12288, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 12288])\n", + "torch.Size([12288, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 12288])\n", + "torch.Size([18432, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([18432, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([12288, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 12288])\n", + "torch.Size([12288, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 12288])\n", + "torch.Size([18432, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([18432, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([12288, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 12288])\n", + "torch.Size([12288, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 12288])\n", + "torch.Size([18432, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([18432, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([12288, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 12288])\n", + "torch.Size([12288, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 12288])\n", + "torch.Size([18432, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([18432, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([12288, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 12288])\n", + "torch.Size([12288, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 12288])\n", + "torch.Size([18432, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([18432, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([12288, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 12288])\n", + "torch.Size([12288, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 12288])\n", + "torch.Size([18432, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([18432, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([12288, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 12288])\n", + "torch.Size([12288, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 12288])\n", + "torch.Size([18432, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([18432, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([12288, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 12288])\n", + "torch.Size([12288, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 12288])\n", + "torch.Size([18432, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([18432, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([12288, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 12288])\n", + "torch.Size([12288, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 12288])\n", + "torch.Size([18432, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([18432, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([12288, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 12288])\n", + "torch.Size([12288, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 12288])\n", + "torch.Size([18432, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([18432, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([12288, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 12288])\n", + "torch.Size([12288, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 12288])\n", + "torch.Size([18432, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([18432, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([12288, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 12288])\n", + "torch.Size([12288, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 12288])\n", + "torch.Size([18432, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([18432, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([12288, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 12288])\n", + "torch.Size([12288, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([3072, 32])\n", + "torch.Size([32, 12288])\n", + "torch.Size([18432, 32])\n", + "torch.Size([32, 3072])\n", + "torch.Size([18432, 32])\n", + "torch.Size([32, 3072])\n" + ] + } + ] + }, + { + "cell_type": "code", + "source": [ + "\n", + "import torch\n", + "from safetensors.torch import load_file, save_file\n", + "import torch.nn as nn\n", + "from torch import linalg as LA\n", + "import os\n", + "import math\n", + "import random\n", + "import numpy as np\n", + "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n", + "def _filter(tgt , pcnt, largest):\n", + " num_topk = math.floor(tgt.numel()*(pcnt/100))\n", + " y = tgt.flatten().to(device = device , dtype=torch.float32)\n", + " values,indices = torch.topk( y , num_topk , largest=largest)\n", + " _values,_indices = torch.topk( -y , num_topk , largest=largest)\n", + " y = y*0\n", + " y[indices] = 1\n", + " y[_indices] = 1\n", + " y = y.unflatten(0,tgt.shape).to(device = device , dtype=torch.float32)\n", + " return torch.mul(tgt,y)\n", + "\n", + "#----#\n", + "\n", + "# For pcnt = 30 , 'filter_and_save' will keep all top 30 % values\n", + "#, and the lowest (negative) 30% values for each layer delta_W in this lora\n", + "# Then save the new filtered lora as a .safetensor file\n", + "def filter_and_save(_lora , savefile_name, new_rank , new_alpha, thresh):\n", + " lora = {}\n", + " count = 0\n", + " for key in _lora:count = count + 1\n", + " NUM_ITEMS = count\n", + " count = 0\n", + " thresh = resolution*0.000001 # 1e-6\n", + " #-------#\n", + " for key in _lora:\n", + " if f'{key}'.find('alpha') > -1:\n", + " lora[f'{key}'] = torch.tensor(new_alpha).to(device = device , dtype = torch.float32)\n", + " count = count + 1\n", + " print(f'{count} / {NUM_ITEMS}')\n", + " continue\n", + " #------#\n", + " if not f'{key}'.find('lora_down') > -1: continue\n", + " up = f'{key}'.replace('lora_down' , 'lora_up')\n", + " down = f'{key}'\n", + " #-------#\n", + " delta_W = torch.matmul(_lora[up],_lora[down]).to(device = device , dtype=torch.float32)\n", + " #---#\n", + " N = delta_W.numel()\n", + " y = delta_W.flatten().to(device = device , dtype=torch.float32)\n", + " values,indices = torch.sort(y, descending = False) # smallest -> largest elements\n", + " y = torch.zeros(y.shape).to(device = device , dtype=torch.float32)\n", + " y[indices[values>thresh]] = 1\n", + " y[indices[values<-thresh]] = 1\n", + " y = y.unflatten(0,delta_W.shape).to(device = device , dtype=torch.float32)\n", + " delta_W = torch.mul(delta_W,y).to(device = device , dtype=torch.float32)\n", + " #------#\n", + " tmp={}\n", + " tmp['u'], tmp['s'], tmp['Vh'] = torch.svd(delta_W)\n", + " tmp['u'] = tmp['u'][:,: new_rank]\n", + " tmp['s'] = tmp['s'][: new_rank]\n", + " #-------#\n", + " tmp['u'] = torch.round(torch.matmul(tmp['u'], torch.diag(tmp['s'])),decimals=6)\n", + " tmp['Vh'] = torch.round(tmp['Vh'].t()[: new_rank,:],decimals=6)\n", + " #-------#\n", + " for key in tmp:tmp[f'{key}'] = tmp[f'{key}'].contiguous()\n", + " lora[up] = tmp['u'].to(device = device , dtype=torch.float32)\n", + " lora[down] = tmp['Vh'].to(device = device , dtype=torch.float32)\n", + " #-------#\n", + " count = count + 2\n", + " print(f'{count} / {NUM_ITEMS}')\n", + " #-------#\n", + " print(f'done!')\n", + " print(f'casting params to fp16....')\n", + " for key in _lora: lora[f'{key}'] = lora[f'{key}'].to(device = device , dtype=torch.float16)\n", + " #-------#\n", + " print(f'done!')\n", + " print(f'saving {savefile_name}...')\n", + " save_file(lora , f'{savefile_name}')\n", + "#--------#\n", + "\n", + "\n", + "new_rank = 32\n", + "new_alpha = new_rank/2\n", + "resolution = 100\n", + "star = load_file('/kaggle/input/flux-loras/yeero.safetensors')\n", + "for key in star:\n", + " star[f'{key}'] = star[f'{key}'].to(device = device , dtype = torch.float32)\n", + "\n", + "filter_and_save(star , f'yeero_{resolution}_r{new_rank}_{new_alpha}alpha.safetensors' , new_rank , new_alpha, resolution)\n", + "\n", + "#pcnt = 30\n", + "#new_rank = 6\n", + "#filter_and_save(yeero , f'yeero_topk{pcnt}_r{new_rank}.safetensors' , pcnt , new_rank)\n", + "#filter_and_save(euro , f'euro_topk{pcnt}_r{new_rank}.safetensors' , pcnt , new_rank)\n", + "#filter_and_save(star , f'star_topk{pcnt}_r{new_rank}.safetensors' , pcnt , new_rank)\n" + ], + "metadata": { + "id": "f46xbSVkUlDl" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "cgi = load_file('/content/drive/MyDrive/Saved from Chrome/cgi_style.safetensors')" + ], + "metadata": { + "id": "JuGDCX5272Bh" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "#cgi = load_file('/content/drive/MyDrive/Saved from Chrome/cgi_style.safetensors')\n", + "doll = load_file('/content/drive/MyDrive/Saved from Chrome/dolls.safetensors')\n", + "euro = load_file('/content/drive/MyDrive/Saved from Chrome/euro.safetensors')\n", + "scale = load_file('/content/drive/MyDrive/Saved from Chrome/scale.safetensors')\n", + "cgi = load_file('/content/drive/MyDrive/Saved from Chrome/cgi.safetensors')\n", + "guns = load_file('/content/drive/MyDrive/Saved from Chrome/guns.safetensors')\n", + "iris = load_file('/content/drive/MyDrive/Saved from Chrome/iris.safetensors')" + ], + "metadata": { + "id": "FftDdBRG7su6" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "for key in doll:\n", + " doll[f'{key}'] = doll[f'{key}'].to(dtype=torch.float16)\n", + " euro[f'{key}'] = euro[f'{key}'].to(dtype=torch.float16)\n", + " scale[f'{key}'] = scale[f'{key}'].to(dtype=torch.float16)" + ], + "metadata": { + "id": "RII9SEqh8KH2" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "import torch\n", + "import torch.nn as nn\n", + "#define metric for similarity\n", + "tgt_dim = torch.Size([64, 3072])\n", + "cos0 = nn.CosineSimilarity(dim=1)\n", + "cos = nn.CosineSimilarity(dim=1)\n", + "\n", + "\n", + "def sim(tgt , ref ,key):\n", + " return torch.sum(torch.abs(cos(tgt, ref[f'{key}']))) + torch.sum(torch.abs(cos0(tgt, ref[f'{key}'])))\n", + "#-----#\n", + "\n", + "from torch import linalg as LA\n", + "\n", + "LA.matrix_norm\n", + "def rand_search(A , B , key , iters):\n", + " tgt_norm = (LA.matrix_norm(A[f'{key}']) + LA.matrix_norm(B[f'{key}']))/2\n", + " tgt_avg = (A[f'{key}'] + B[f'{key}'])/2\n", + "\n", + " max_sim = (sim(tgt_avg , A , key) + sim(tgt_avg , B , key))\n", + " cand = tgt_avg\n", + "\n", + " for iter in range(iters):\n", + " rand = torch.ones(tgt_dim)*(-0.5) + torch.rand(tgt_dim)\n", + " rand = rand * (tgt_norm/LA.matrix_norm(rand))\n", + " #rand = (rand + tgt_avg)/2\n", + " #rand = rand * (tgt_norm/LA.matrix_norm(rand))\n", + "\n", + " tmp = sim(rand,A, key) + sim(rand , B, key)\n", + " if (tmp > max_sim):\n", + " max_sim = tmp\n", + " cand = rand\n", + " print('found!')\n", + " break\n", + " #------#\n", + " print('returning')\n", + " return cand , max_sim\n", + "#-----#" + ], + "metadata": { + "id": "hJL6QEclHdHn" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "cand , max_sim = rand_search(cgi , iris , 'lora_unet_double_blocks_0_img_attn_proj.lora_down.weight' , 1000)\n", + "print(sim(cand , iris , key))\n", + "print(sim(cand , cgi , key))" + ], + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "ckyBSQi5Ll4F", + "outputId": "341f7192-083d-4423-f61f-4f49d5756e79" + }, + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "returning\n", + "tensor(91.1875, dtype=torch.float16)\n", + "tensor(90.2500, dtype=torch.float16)\n" + ] + } + ] + }, + { + "cell_type": "code", + "source": [ + "(torch.rand(1).to(dtype=torch.float16)*3).item()" + ], + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "XLwslN61hiIJ", + "outputId": "9e3cbba6-3727-4772-f453-fecf8a408790" + }, + "execution_count": null, + "outputs": [ + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "0.2138671875" + ] + }, + "metadata": {}, + "execution_count": 16 + } + ] + }, + { + "cell_type": "code", + "source": [ + "torch.rand(1).to(dtype=torch.float16)*10" + ], + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "AKwh0lZ1f8dJ", + "outputId": "59186526-bd73-4efe-925a-3e7a9c738e53" + }, + "execution_count": null, + "outputs": [ + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "tensor([6.8555], dtype=torch.float16)" + ] + }, + "metadata": {}, + "execution_count": 13 + } + ] + }, + { + "cell_type": "code", + "source": [ + "import torch\n", + "import torch.nn as nn\n", + "#define metric for similarity\n", + "tgt_dim = torch.Size([64, 3072])\n", + "cos0 = nn.CosineSimilarity(dim=0)\n", + "\n", + "\n", + "\n", + "cos = nn.CosineSimilarity(dim=1)\n", + "\n", + "\n", + "def sim(tgt , ref ,key):\n", + " return torch.sum(torch.abs(cos(tgt, ref[f'{key}']))) + torch.sum(torch.abs(cos0(tgt, ref[f'{key}'])))\n", + "#-----#" + ], + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "SNCvvkb2h3Zb", + "outputId": "725fabd1-3fe2-4ac2-f24c-5f9309d45e4a" + }, + "execution_count": null, + "outputs": [ + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "7.715576171875" + ] + }, + "metadata": {}, + "execution_count": 37 + } + ] + }, + { + "cell_type": "code", + "source": [ + "from safetensors.torch import load_file , save_file\n", + "import torch\n", + "import torch.nn as nn\n", + "from torch import linalg as LA\n", + "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n", + "#define metric for similarity\n", + "cos0 = nn.CosineSimilarity(dim=0).to(device)\n", + "final_score = 0\n", + "highest_score = 0\n", + "w_cgi = 1\n", + "w_doll = 2\n", + "w_euro = 2\n", + "w_guns = 1\n", + "w_iris = 2\n", + "w_scale = 1\n", + "\n", + "w_noise = 0.00001 * (w_cgi + w_doll + w_euro + w_guns + w_iris + w_scale)\n", + "fixed_noise = {}\n", + "\n", + "#for key in doll:\n", + "# fixed_noise[f'{key}'] = torch.zeros(doll[f'{key}'].shape).to(device = device , dtype=torch.float16)\n", + "#------#\n", + "#w_offset = 0* (w1+w2+w3)\n", + "#_w_offset = 0\n", + "\n", + "W = (w_cgi + w_doll + w_euro + w_guns + w_iris + w_scale + w_noise)*torch.ones(1).to(device = device,dtype=torch.float16)\n", + "\n", + "SCALE = 0.0001\n", + "one = torch.ones(1).to(dtype=torch.float16).to(device)\n", + "\n", + "for attempt in range(1000):\n", + " print(f'attempt no : {attempt+1} ')\n", + " merge = load_file('/content/drive/MyDrive/Saved from Chrome/dolls.safetensors')\n", + " for key in doll:\n", + " tgt_dim = doll[f'{key}'].shape\n", + " if tgt_dim == torch.Size([]): continue\n", + " r_cgi = torch.rand(1).to(device = device,dtype=torch.float16).item()*w_cgi\n", + " r_doll = torch.rand(1).to(device = device,dtype=torch.float16).item()*w_doll\n", + " r_euro = torch.rand(1).to(device = device,dtype=torch.float16).item()*w_euro\n", + " r_guns = torch.rand(1).to(device = device,dtype=torch.float16).item()*w_guns\n", + " r_iris = torch.rand(1).to(device = device,dtype=torch.float16).item()*w_iris\n", + " r_scale = torch.rand(1).to(device = device,dtype=torch.float16).item()*w_scale\n", + " #------#\n", + " noise = torch.rand(tgt_dim).to(device = device,dtype=torch.float16)\n", + " noise_norm = LA.matrix_norm(noise).to(device = device,dtype=torch.float16).item()\n", + " noise = (w_noise/noise_norm)*noise.to(device = device,dtype=torch.float16)\n", + " #-----#\n", + " merge[f'{key}'] = r_cgi * cgi[f'{key}'] #overwrite\n", + " merge[f'{key}'] = merge[f'{key}'] + r_doll * doll[f'{key}']\n", + " merge[f'{key}'] = merge[f'{key}'] + r_euro * euro[f'{key}']\n", + " merge[f'{key}'] = merge[f'{key}'] + r_guns * guns[f'{key}']\n", + " merge[f'{key}'] = merge[f'{key}'] + r_iris * iris[f'{key}']\n", + " merge[f'{key}'] = merge[f'{key}'] + r_scale * scale[f'{key}']\n", + " merge[f'{key}'] = ((merge[f'{key}'] + noise)/W).to(device = device,dtype=torch.float16)\n", + " #-------#\n", + " score = torch.zeros(1).to(device = device, dtype=torch.float32)\n", + " #----#\n", + " NUM_ITERS = 10\n", + " for iter in range(NUM_ITERS):\n", + " for key in doll:\n", + " tgt_dim = doll[f'{key}'].shape\n", + " if tgt_dim == torch.Size([]): continue\n", + " vec = torch.rand(tgt_dim[0]).to(device = device,dtype=torch.float16)\n", + " cgi_out = torch.matmul(vec , cgi[f'{key}']).to(device = device,dtype=torch.float16)\n", + " doll_out = torch.matmul(vec , doll[f'{key}']).to(device = device,dtype=torch.float16)\n", + " euro_out = torch.matmul(vec , euro[f'{key}']).to(device = device,dtype=torch.float16)\n", + " guns_out = torch.matmul(vec , guns[f'{key}']).to(device = device,dtype=torch.float16)\n", + " iris_out = torch.matmul(vec , iris[f'{key}']).to(device = device,dtype=torch.float16)\n", + " scale_out = torch.matmul(vec , scale[f'{key}']).to(device = device,dtype=torch.float16)\n", + " merge_out = torch.matmul(vec , merge[f'{key}']).to(device = device,dtype=torch.float16)\n", + " #-------#\n", + " sim_value_cgi = torch.abs(cos0(cgi_out , merge_out)).to(device = device,dtype=torch.float32)*SCALE\n", + " sim_value_doll = torch.abs(cos0(doll_out , merge_out)).to(device = device,dtype=torch.float32)*SCALE\n", + " sim_value_euro = torch.abs(cos0(euro_out , merge_out)).to(device = device,dtype=torch.float32)*SCALE\n", + " sim_value_guns = torch.abs(cos0(guns_out , merge_out)).to(device = device,dtype=torch.float32)*SCALE\n", + " sim_value_iris = torch.abs(cos0(iris_out , merge_out)).to(device = device,dtype=torch.float32)*SCALE\n", + " sim_value_scale = torch.abs(cos0(scale_out , merge_out)).to(device = device,dtype=torch.float32)*SCALE\n", + " score = score + SCALE*(sim_value_cgi + 2*sim_value_doll + 2*sim_value_euro + sim_value_guns + 2*sim_value_iris + sim_value_scale)/9 #<--- This score can be anything at all\n", + " #----#\n", + " #-----#\n", + "\n", + " final_score = (1000/(NUM_ITERS * SCALE))*score.to(device = 'cpu' , dtype=torch.float32).item()\n", + " if (final_score>highest_score) :\n", + " highest_score = final_score\n", + " print('new highscore!')\n", + " print(f'score : {final_score} pts')\n", + " #------#\n", + " save_file(merge , 'all_merge_R4.safetensors')\n", + " #------#\n", + "\n", + "print(f'------------')\n", + "print(f'Final score : {highest_score} pts')\n", + "\n", + "\n", + "#all R1 23.190992578747682\n", + "\n", + "#all R2 23.333244826062582\n", + "\n", + "#all R3 23.34471355425194\n", + "\n", + "#all R4 23.402637452818453" + ], + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 1000 + }, + "id": "9L_g5Zp9Du2E", + "outputId": "a3aa2bde-061e-43f5-ca35-96bdc470be80" + }, + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "attempt no : 1 \n", + "new highscore!\n", + "score : 23.264414267032407 pts\n", + "attempt no : 2 \n", + "attempt no : 3 \n", + "attempt no : 4 \n", + "new highscore!\n", + "score : 23.29399467271287 pts\n", + "attempt no : 5 \n", + "attempt no : 6 \n", + "attempt no : 7 \n", + "attempt no : 8 \n", + "attempt no : 9 \n", + "attempt no : 10 \n", + "attempt no : 11 \n", + "new highscore!\n", + "score : 23.362628780887462 pts\n", + "attempt no : 12 \n", + "attempt no : 13 \n", + "attempt no : 14 \n", + "attempt no : 15 \n", + "attempt no : 16 \n", + "attempt no : 17 \n", + "attempt no : 18 \n", + "attempt no : 19 \n", + "attempt no : 20 \n", + "attempt no : 21 \n", + "attempt no : 22 \n", + "attempt no : 23 \n", + "new highscore!\n", + "score : 23.37011210329365 pts\n", + "attempt no : 24 \n", + "attempt no : 25 \n", + "attempt no : 26 \n", + "attempt no : 27 \n", + "attempt no : 28 \n", + "attempt no : 29 \n", + "attempt no : 30 \n", + "attempt no : 31 \n", + "attempt no : 32 \n", + "attempt no : 33 \n", + "attempt no : 34 \n", + "new highscore!\n", + "score : 23.402637452818453 pts\n", + "attempt no : 35 \n", + "attempt no : 36 \n", + "attempt no : 37 \n", + "attempt no : 38 \n", + "attempt no : 39 \n", + "attempt no : 40 \n", + "attempt no : 41 \n", + "attempt no : 42 \n", + "attempt no : 43 \n", + "attempt no : 44 \n", + "attempt no : 45 \n", + "attempt no : 46 \n", + "attempt no : 47 \n", + "attempt no : 48 \n", + "attempt no : 49 \n", + "attempt no : 50 \n", + "attempt no : 51 \n", + "attempt no : 52 \n", + "attempt no : 53 \n", + "attempt no : 54 \n", + "attempt no : 55 \n", + "attempt no : 56 \n", + "attempt no : 57 \n", + "attempt no : 58 \n", + "attempt no : 59 \n", + "attempt no : 60 \n", + "attempt no : 61 \n", + "attempt no : 62 \n", + "attempt no : 63 \n", + "attempt no : 64 \n", + "attempt no : 65 \n", + "attempt no : 66 \n", + "attempt no : 67 \n", + "attempt no : 68 \n", + "attempt no : 69 \n", + "attempt no : 70 \n", + "attempt no : 71 \n", + "attempt no : 72 \n", + "attempt no : 73 \n", + "attempt no : 74 \n", + "attempt no : 75 \n", + "attempt no : 76 \n", + "attempt no : 77 \n", + "attempt no : 78 \n", + "attempt no : 79 \n", + "attempt no : 80 \n", + "attempt no : 81 \n", + "attempt no : 82 \n", + "attempt no : 83 \n", + "attempt no : 84 \n", + "attempt no : 85 \n", + "attempt no : 86 \n" + ] + }, + { + "output_type": "error", + "ename": "KeyboardInterrupt", + "evalue": "", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)", + "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 61\u001b[0m \u001b[0mtgt_dim\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mdoll\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34mf'{key}'\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mshape\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 62\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mtgt_dim\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0mtorch\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mSize\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0;32mcontinue\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 63\u001b[0;31m \u001b[0mvec\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtorch\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrand\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtgt_dim\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mto\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdevice\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mdevice\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mdtype\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mtorch\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mfloat16\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 64\u001b[0m \u001b[0mcgi_out\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtorch\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmatmul\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mvec\u001b[0m \u001b[0;34m,\u001b[0m \u001b[0mcgi\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34mf'{key}'\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mto\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdevice\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mdevice\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mdtype\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mtorch\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mfloat16\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 65\u001b[0m \u001b[0mdoll_out\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtorch\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmatmul\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mvec\u001b[0m \u001b[0;34m,\u001b[0m \u001b[0mdoll\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34mf'{key}'\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mto\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdevice\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mdevice\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mdtype\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mtorch\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mfloat16\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;31mKeyboardInterrupt\u001b[0m: " + ] + } + ] + }, + { + "cell_type": "code", + "source": [ + " for key in doll:\n", + " if final_score<38.5: break\n", + " _w_offset = w_offset\n", + " W = (w1+w2+w3 + w_noise + _w_offset)*torch.ones(1).to(device = device,dtype=torch.float16)\n", + " tgt_dim = doll[f'{key}'].shape\n", + " if tgt_dim == torch.Size([]): continue\n", + " fixed_noise[f'{key}'] = fixed_noise[f'{key}'] + merge[f'{key}']\n", + " fixed_noise[f'{key}'] = (fixed_noise[f'{key}'] * (w_offset*torch.ones(1).to(device = device,dtype=torch.float16)/LA.matrix_norm(fixed_noise[f'{key}']))).to(device = device,dtype=torch.float16)" + ], + "metadata": { + "id": "jWFHMJN6TqDq" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + " vec = torch.rand(tgt_dim[0]).to(dtype=torch.float16)\n", + " same = torch.abs(cos0(vec ,vec))" + ], + "metadata": { + "id": "k7Pq-kDbuNnQ" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "same" + ], + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "ANBPfP7tuOoa", + "outputId": "24300487-f874-4f1b-beb7-0f441ec7df4a" + }, + "execution_count": null, + "outputs": [ + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "tensor(1., dtype=torch.float16)" + ] + }, + "metadata": {}, + "execution_count": 65 + } + ] + }, + { + "cell_type": "code", + "source": [ + "torch.ones(1).to(dtype=torch.float16)" + ], + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "zN92j8JJuQ6G", + "outputId": "b810f4e6-a8f3-426a-ae52-ffbd44fb3f00" + }, + "execution_count": null, + "outputs": [ + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "tensor([1.], dtype=torch.float16)" + ] + }, + "metadata": {}, + "execution_count": 66 + } + ] + }, + { + "cell_type": "code", + "source": [ + "\n", + "\n", + "\n", + "\n", + "\n" + ], + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "py-JMJzhsAI4", + "outputId": "207cd809-031c-48e3-af0a-98bc114d910e" + }, + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "score : 45.8125 pts\n" + ] + } + ] + }, + { + "cell_type": "code", + "source": [ + "%cd /content/\n", + "save_file(merge , 'doll_euro_scale_R_merge.safetensors')" + ], + "metadata": { + "id": "7qogsYsAr2QU" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [], + "metadata": { + "id": "9wzLwurSpwpL" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "test = torch.rand(tgt_dim)\n", + "vec = torch.rand(tgt_dim[0])" + ], + "metadata": { + "id": "DHdy4DptowYG" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "tgt_dim[0]" + ], + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "WeNJ0bquphtx", + "outputId": "442bfb2e-c1ab-4549-a4ea-ca80d3cc9a7d" + }, + "execution_count": null, + "outputs": [ + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "9216" + ] + }, + "metadata": {}, + "execution_count": 46 + } + ] + }, + { + "cell_type": "code", + "source": [ + "(torch.matmul(vec,test)).shape" + ], + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "xqZp3Xo8pQuW", + "outputId": "68e5c25e-3391-45e7-9c73-45e0174ddbc1" + }, + "execution_count": null, + "outputs": [ + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "torch.Size([64])" + ] + }, + "metadata": {}, + "execution_count": 48 + } + ] + }, + { + "cell_type": "code", + "source": [ + "tgt_dim = torch.Size([64, 3072])\n", + "cosa = nn.CosineSimilarity(dim=0)\n", + "cos_dim1 = nn.CosineSimilarity(dim=1)\n", + "\n", + "for key in cgi:\n", + " if not cgi[f'{key}'].shape == torch.Size([64, 3072]): continue\n", + " print(f'{key} : ')\n", + " print(torch.sum(torch.abs(cos_dim1(cgi[f'{key}'] , iris[f'{key}']))))" + ], + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "VFNw0Nck8V6Q", + "outputId": "e48bab98-18f7-43bb-d1cf-89f3e00f7ccf" + }, + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "lora_unet_double_blocks_0_img_attn_proj.lora_down.weight : \n", + "tensor(1.6982, dtype=torch.float16)\n", + "lora_unet_double_blocks_0_img_attn_qkv.lora_down.weight : \n", + "tensor(1.8145, dtype=torch.float16)\n", + "lora_unet_double_blocks_0_img_mlp_0.lora_down.weight : \n", + "tensor(1.6309, dtype=torch.float16)\n", + "lora_unet_double_blocks_0_img_mod_lin.lora_down.weight : \n", + "tensor(2.6211, dtype=torch.float16)\n", + "lora_unet_double_blocks_0_txt_attn_proj.lora_down.weight : \n", + "tensor(2.3203, dtype=torch.float16)\n", + "lora_unet_double_blocks_0_txt_attn_qkv.lora_down.weight : \n", + "tensor(2.3027, dtype=torch.float16)\n", + "lora_unet_double_blocks_0_txt_mlp_0.lora_down.weight : \n", + "tensor(2.5898, dtype=torch.float16)\n", + "lora_unet_double_blocks_0_txt_mod_lin.lora_down.weight : \n", + "tensor(2.7402, dtype=torch.float16)\n", + "lora_unet_double_blocks_10_img_attn_proj.lora_down.weight : \n", + "tensor(2.0410, dtype=torch.float16)\n", + "lora_unet_double_blocks_10_img_attn_qkv.lora_down.weight : \n", + "tensor(1.3350, dtype=torch.float16)\n", + "lora_unet_double_blocks_10_img_mlp_0.lora_down.weight : \n", + "tensor(2.0020, dtype=torch.float16)\n", + "lora_unet_double_blocks_10_img_mod_lin.lora_down.weight : \n", + "tensor(2.6562, dtype=torch.float16)\n", + "lora_unet_double_blocks_10_txt_attn_proj.lora_down.weight : \n", + "tensor(1.1816, dtype=torch.float16)\n", + "lora_unet_double_blocks_10_txt_attn_qkv.lora_down.weight : \n", + "tensor(1.1348, dtype=torch.float16)\n", + "lora_unet_double_blocks_10_txt_mlp_0.lora_down.weight : \n", + "tensor(3.0156, dtype=torch.float16)\n", + "lora_unet_double_blocks_10_txt_mod_lin.lora_down.weight : \n", + "tensor(1.4746, dtype=torch.float16)\n", + "lora_unet_double_blocks_11_img_attn_proj.lora_down.weight : \n", + "tensor(1.8359, dtype=torch.float16)\n", + "lora_unet_double_blocks_11_img_attn_qkv.lora_down.weight : \n", + "tensor(1.5312, dtype=torch.float16)\n", + "lora_unet_double_blocks_11_img_mlp_0.lora_down.weight : \n", + "tensor(2.1465, dtype=torch.float16)\n", + "lora_unet_double_blocks_11_img_mod_lin.lora_down.weight : \n", + "tensor(3.9277, dtype=torch.float16)\n", + "lora_unet_double_blocks_11_txt_attn_proj.lora_down.weight : \n", + "tensor(1.7246, dtype=torch.float16)\n", + "lora_unet_double_blocks_11_txt_attn_qkv.lora_down.weight : \n", + "tensor(1.8594, dtype=torch.float16)\n", + "lora_unet_double_blocks_11_txt_mlp_0.lora_down.weight : \n", + "tensor(3.6465, dtype=torch.float16)\n", + "lora_unet_double_blocks_11_txt_mod_lin.lora_down.weight : \n", + "tensor(2.6152, dtype=torch.float16)\n", + "lora_unet_double_blocks_12_img_attn_proj.lora_down.weight : \n", + "tensor(1.7295, dtype=torch.float16)\n", + "lora_unet_double_blocks_12_img_attn_qkv.lora_down.weight : \n", + "tensor(1.4795, dtype=torch.float16)\n", + "lora_unet_double_blocks_12_img_mlp_0.lora_down.weight : \n", + "tensor(3.4043, dtype=torch.float16)\n", + "lora_unet_double_blocks_12_img_mod_lin.lora_down.weight : \n", + "tensor(2.0137, dtype=torch.float16)\n", + "lora_unet_double_blocks_12_txt_attn_proj.lora_down.weight : \n", + "tensor(1.4375, dtype=torch.float16)\n", + "lora_unet_double_blocks_12_txt_attn_qkv.lora_down.weight : \n", + "tensor(1.8994, dtype=torch.float16)\n", + "lora_unet_double_blocks_12_txt_mlp_0.lora_down.weight : \n", + "tensor(2.1152, dtype=torch.float16)\n", + "lora_unet_double_blocks_12_txt_mod_lin.lora_down.weight : \n", + "tensor(1.2744, dtype=torch.float16)\n", + "lora_unet_double_blocks_13_img_attn_proj.lora_down.weight : \n", + "tensor(3.0742, dtype=torch.float16)\n", + "lora_unet_double_blocks_13_img_attn_qkv.lora_down.weight : \n", + "tensor(1.4980, dtype=torch.float16)\n", + "lora_unet_double_blocks_13_img_mlp_0.lora_down.weight : \n", + "tensor(1.9609, dtype=torch.float16)\n", + "lora_unet_double_blocks_13_img_mod_lin.lora_down.weight : \n", + "tensor(2.6133, dtype=torch.float16)\n", + "lora_unet_double_blocks_13_txt_attn_proj.lora_down.weight : \n", + "tensor(1.6904, dtype=torch.float16)\n", + "lora_unet_double_blocks_13_txt_attn_qkv.lora_down.weight : \n", + "tensor(2.1680, dtype=torch.float16)\n", + "lora_unet_double_blocks_13_txt_mlp_0.lora_down.weight : \n", + "tensor(2.8574, dtype=torch.float16)\n", + "lora_unet_double_blocks_13_txt_mod_lin.lora_down.weight : \n", + "tensor(1.9053, dtype=torch.float16)\n", + "lora_unet_double_blocks_14_img_attn_proj.lora_down.weight : \n", + "tensor(1.8135, dtype=torch.float16)\n", + "lora_unet_double_blocks_14_img_attn_qkv.lora_down.weight : \n", + "tensor(1.4033, dtype=torch.float16)\n", + "lora_unet_double_blocks_14_img_mlp_0.lora_down.weight : \n", + "tensor(1.5547, dtype=torch.float16)\n", + "lora_unet_double_blocks_14_img_mod_lin.lora_down.weight : \n", + "tensor(2.8906, dtype=torch.float16)\n", + "lora_unet_double_blocks_14_txt_attn_proj.lora_down.weight : \n", + "tensor(1.1328, dtype=torch.float16)\n", + "lora_unet_double_blocks_14_txt_attn_qkv.lora_down.weight : \n", + "tensor(1.3701, dtype=torch.float16)\n", + "lora_unet_double_blocks_14_txt_mlp_0.lora_down.weight : \n", + "tensor(3.3145, dtype=torch.float16)\n", + "lora_unet_double_blocks_14_txt_mod_lin.lora_down.weight : \n", + "tensor(1.2031, dtype=torch.float16)\n", + "lora_unet_double_blocks_15_img_attn_proj.lora_down.weight : \n", + "tensor(1.5137, dtype=torch.float16)\n", + "lora_unet_double_blocks_15_img_attn_qkv.lora_down.weight : \n", + "tensor(1.3809, dtype=torch.float16)\n", + "lora_unet_double_blocks_15_img_mlp_0.lora_down.weight : \n", + "tensor(1.4834, dtype=torch.float16)\n", + "lora_unet_double_blocks_15_img_mod_lin.lora_down.weight : \n", + "tensor(1.6465, dtype=torch.float16)\n", + "lora_unet_double_blocks_15_txt_attn_proj.lora_down.weight : \n", + "tensor(1.7256, dtype=torch.float16)\n", + "lora_unet_double_blocks_15_txt_attn_qkv.lora_down.weight : \n", + "tensor(2.8672, dtype=torch.float16)\n", + "lora_unet_double_blocks_15_txt_mlp_0.lora_down.weight : \n", + "tensor(2.1953, dtype=torch.float16)\n", + "lora_unet_double_blocks_15_txt_mod_lin.lora_down.weight : \n", + "tensor(0.9858, dtype=torch.float16)\n", + "lora_unet_double_blocks_16_img_attn_proj.lora_down.weight : \n", + "tensor(1.5703, dtype=torch.float16)\n", + "lora_unet_double_blocks_16_img_attn_qkv.lora_down.weight : \n", + "tensor(1.4648, dtype=torch.float16)\n", + "lora_unet_double_blocks_16_img_mlp_0.lora_down.weight : \n", + "tensor(1.5537, dtype=torch.float16)\n", + "lora_unet_double_blocks_16_img_mod_lin.lora_down.weight : \n", + "tensor(2.6133, dtype=torch.float16)\n", + "lora_unet_double_blocks_16_txt_attn_proj.lora_down.weight : \n", + "tensor(2.2559, dtype=torch.float16)\n", + "lora_unet_double_blocks_16_txt_attn_qkv.lora_down.weight : \n", + "tensor(1.9365, dtype=torch.float16)\n", + "lora_unet_double_blocks_16_txt_mlp_0.lora_down.weight : \n", + "tensor(2.7891, dtype=torch.float16)\n", + "lora_unet_double_blocks_16_txt_mod_lin.lora_down.weight : \n", + "tensor(1.3174, dtype=torch.float16)\n", + "lora_unet_double_blocks_17_img_attn_proj.lora_down.weight : \n", + "tensor(2.4609, dtype=torch.float16)\n", + "lora_unet_double_blocks_17_img_attn_qkv.lora_down.weight : \n", + "tensor(1.6240, dtype=torch.float16)\n", + "lora_unet_double_blocks_17_img_mlp_0.lora_down.weight : \n", + "tensor(3.1406, dtype=torch.float16)\n", + "lora_unet_double_blocks_17_img_mod_lin.lora_down.weight : \n", + "tensor(2.1055, dtype=torch.float16)\n", + "lora_unet_double_blocks_17_txt_attn_proj.lora_down.weight : \n", + "tensor(1.7480, dtype=torch.float16)\n", + "lora_unet_double_blocks_17_txt_attn_qkv.lora_down.weight : \n", + "tensor(1.6436, dtype=torch.float16)\n", + "lora_unet_double_blocks_17_txt_mlp_0.lora_down.weight : \n", + "tensor(1.9688, dtype=torch.float16)\n", + "lora_unet_double_blocks_17_txt_mod_lin.lora_down.weight : \n", + "tensor(1.8184, dtype=torch.float16)\n", + "lora_unet_double_blocks_18_img_attn_proj.lora_down.weight : \n", + "tensor(2.3887, dtype=torch.float16)\n", + "lora_unet_double_blocks_18_img_attn_qkv.lora_down.weight : \n", + "tensor(1.6738, dtype=torch.float16)\n", + "lora_unet_double_blocks_18_img_mlp_0.lora_down.weight : \n", + "tensor(3.7500, dtype=torch.float16)\n", + "lora_unet_double_blocks_18_img_mod_lin.lora_down.weight : \n", + "tensor(2.7285, dtype=torch.float16)\n", + "lora_unet_double_blocks_18_txt_attn_proj.lora_down.weight : \n", + "tensor(2.0410, dtype=torch.float16)\n", + "lora_unet_double_blocks_18_txt_attn_qkv.lora_down.weight : \n", + "tensor(2.0586, dtype=torch.float16)\n", + "lora_unet_double_blocks_18_txt_mlp_0.lora_down.weight : \n", + "tensor(2.0801, dtype=torch.float16)\n", + "lora_unet_double_blocks_18_txt_mod_lin.lora_down.weight : \n", + "tensor(1.5684, dtype=torch.float16)\n", + "lora_unet_double_blocks_1_img_attn_proj.lora_down.weight : \n", + "tensor(4.9844, dtype=torch.float16)\n", + "lora_unet_double_blocks_1_img_attn_qkv.lora_down.weight : \n", + "tensor(1.8613, dtype=torch.float16)\n", + "lora_unet_double_blocks_1_img_mlp_0.lora_down.weight : \n", + "tensor(2.2266, dtype=torch.float16)\n", + "lora_unet_double_blocks_1_img_mod_lin.lora_down.weight : \n", + "tensor(2.8164, dtype=torch.float16)\n", + "lora_unet_double_blocks_1_txt_attn_proj.lora_down.weight : \n", + "tensor(1.7500, dtype=torch.float16)\n", + "lora_unet_double_blocks_1_txt_attn_qkv.lora_down.weight : \n", + "tensor(2.3105, dtype=torch.float16)\n", + "lora_unet_double_blocks_1_txt_mlp_0.lora_down.weight : \n", + "tensor(1.9639, dtype=torch.float16)\n", + "lora_unet_double_blocks_1_txt_mod_lin.lora_down.weight : \n", + "tensor(2.6504, dtype=torch.float16)\n", + "lora_unet_double_blocks_2_img_attn_proj.lora_down.weight : \n", + "tensor(4.6367, dtype=torch.float16)\n", + "lora_unet_double_blocks_2_img_attn_qkv.lora_down.weight : \n", + "tensor(1.7988, dtype=torch.float16)\n", + "lora_unet_double_blocks_2_img_mlp_0.lora_down.weight : \n", + "tensor(4.6758, dtype=torch.float16)\n", + "lora_unet_double_blocks_2_img_mod_lin.lora_down.weight : \n", + "tensor(3.1445, dtype=torch.float16)\n", + "lora_unet_double_blocks_2_txt_attn_proj.lora_down.weight : \n", + "tensor(2.2285, dtype=torch.float16)\n", + "lora_unet_double_blocks_2_txt_attn_qkv.lora_down.weight : \n", + "tensor(1.4990, dtype=torch.float16)\n", + "lora_unet_double_blocks_2_txt_mlp_0.lora_down.weight : \n", + "tensor(2.3984, dtype=torch.float16)\n", + "lora_unet_double_blocks_2_txt_mod_lin.lora_down.weight : \n", + "tensor(1.4443, dtype=torch.float16)\n", + "lora_unet_double_blocks_3_img_attn_proj.lora_down.weight : \n", + "tensor(3.6855, dtype=torch.float16)\n", + "lora_unet_double_blocks_3_img_attn_qkv.lora_down.weight : \n", + "tensor(1.9971, dtype=torch.float16)\n", + "lora_unet_double_blocks_3_img_mlp_0.lora_down.weight : \n", + "tensor(3.3301, dtype=torch.float16)\n", + "lora_unet_double_blocks_3_img_mod_lin.lora_down.weight : \n", + "tensor(2.3379, dtype=torch.float16)\n", + "lora_unet_double_blocks_3_txt_attn_proj.lora_down.weight : \n", + "tensor(2.0117, dtype=torch.float16)\n", + "lora_unet_double_blocks_3_txt_attn_qkv.lora_down.weight : \n", + "tensor(2.1621, dtype=torch.float16)\n", + "lora_unet_double_blocks_3_txt_mlp_0.lora_down.weight : \n", + "tensor(2.7676, dtype=torch.float16)\n", + "lora_unet_double_blocks_3_txt_mod_lin.lora_down.weight : \n", + "tensor(3.1895, dtype=torch.float16)\n", + "lora_unet_double_blocks_4_img_attn_proj.lora_down.weight : \n", + "tensor(2.3848, dtype=torch.float16)\n", + "lora_unet_double_blocks_4_img_attn_qkv.lora_down.weight : \n", + "tensor(1.7783, dtype=torch.float16)\n", + "lora_unet_double_blocks_4_img_mlp_0.lora_down.weight : \n", + "tensor(2.0234, dtype=torch.float16)\n", + "lora_unet_double_blocks_4_img_mod_lin.lora_down.weight : \n", + "tensor(1.9082, dtype=torch.float16)\n", + "lora_unet_double_blocks_4_txt_attn_proj.lora_down.weight : \n", + "tensor(1.7588, dtype=torch.float16)\n", + "lora_unet_double_blocks_4_txt_attn_qkv.lora_down.weight : \n", + "tensor(2.9902, dtype=torch.float16)\n", + "lora_unet_double_blocks_4_txt_mlp_0.lora_down.weight : \n", + "tensor(1.5859, dtype=torch.float16)\n", + "lora_unet_double_blocks_4_txt_mod_lin.lora_down.weight : \n", + "tensor(1.5654, dtype=torch.float16)\n", + "lora_unet_double_blocks_5_img_attn_proj.lora_down.weight : \n", + "tensor(2.7402, dtype=torch.float16)\n", + "lora_unet_double_blocks_5_img_attn_qkv.lora_down.weight : \n", + "tensor(1.6221, dtype=torch.float16)\n", + "lora_unet_double_blocks_5_img_mlp_0.lora_down.weight : \n", + "tensor(1.6318, dtype=torch.float16)\n", + "lora_unet_double_blocks_5_img_mod_lin.lora_down.weight : \n", + "tensor(1.7988, dtype=torch.float16)\n", + "lora_unet_double_blocks_5_txt_attn_proj.lora_down.weight : \n", + "tensor(1.1699, dtype=torch.float16)\n", + "lora_unet_double_blocks_5_txt_attn_qkv.lora_down.weight : \n", + "tensor(3.5566, dtype=torch.float16)\n", + "lora_unet_double_blocks_5_txt_mlp_0.lora_down.weight : \n", + "tensor(1.5791, dtype=torch.float16)\n", + "lora_unet_double_blocks_5_txt_mod_lin.lora_down.weight : \n", + "tensor(1.5547, dtype=torch.float16)\n", + "lora_unet_double_blocks_6_img_attn_proj.lora_down.weight : \n", + "tensor(1.7988, dtype=torch.float16)\n", + "lora_unet_double_blocks_6_img_attn_qkv.lora_down.weight : \n", + "tensor(1.4531, dtype=torch.float16)\n", + "lora_unet_double_blocks_6_img_mlp_0.lora_down.weight : \n", + "tensor(2.4141, dtype=torch.float16)\n", + "lora_unet_double_blocks_6_img_mod_lin.lora_down.weight : \n", + "tensor(6.0234, dtype=torch.float16)\n", + "lora_unet_double_blocks_6_txt_attn_proj.lora_down.weight : \n", + "tensor(1.0068, dtype=torch.float16)\n", + "lora_unet_double_blocks_6_txt_attn_qkv.lora_down.weight : \n", + "tensor(2.0098, dtype=torch.float16)\n", + "lora_unet_double_blocks_6_txt_mlp_0.lora_down.weight : \n", + "tensor(4.0312, dtype=torch.float16)\n", + "lora_unet_double_blocks_6_txt_mod_lin.lora_down.weight : \n", + "tensor(2.6309, dtype=torch.float16)\n", + "lora_unet_double_blocks_7_img_attn_proj.lora_down.weight : \n", + "tensor(1.4814, dtype=torch.float16)\n", + "lora_unet_double_blocks_7_img_attn_qkv.lora_down.weight : \n", + "tensor(1.4854, dtype=torch.float16)\n", + "lora_unet_double_blocks_7_img_mlp_0.lora_down.weight : \n", + "tensor(1.3877, dtype=torch.float16)\n", + "lora_unet_double_blocks_7_img_mod_lin.lora_down.weight : \n", + "tensor(2.3125, dtype=torch.float16)\n", + "lora_unet_double_blocks_7_txt_attn_proj.lora_down.weight : \n", + "tensor(3.4746, dtype=torch.float16)\n", + "lora_unet_double_blocks_7_txt_attn_qkv.lora_down.weight : \n", + "tensor(2.0430, dtype=torch.float16)\n", + "lora_unet_double_blocks_7_txt_mlp_0.lora_down.weight : \n", + "tensor(1.8018, dtype=torch.float16)\n", + "lora_unet_double_blocks_7_txt_mod_lin.lora_down.weight : \n", + "tensor(1.1709, dtype=torch.float16)\n", + "lora_unet_double_blocks_8_img_attn_proj.lora_down.weight : \n", + "tensor(1.8857, dtype=torch.float16)\n", + "lora_unet_double_blocks_8_img_attn_qkv.lora_down.weight : \n", + "tensor(1.8848, dtype=torch.float16)\n", + "lora_unet_double_blocks_8_img_mlp_0.lora_down.weight : \n", + "tensor(1.7627, dtype=torch.float16)\n", + "lora_unet_double_blocks_8_img_mod_lin.lora_down.weight : \n", + "tensor(4.2852, dtype=torch.float16)\n", + "lora_unet_double_blocks_8_txt_attn_proj.lora_down.weight : \n", + "tensor(1.3887, dtype=torch.float16)\n", + "lora_unet_double_blocks_8_txt_attn_qkv.lora_down.weight : \n", + "tensor(1.6289, dtype=torch.float16)\n", + "lora_unet_double_blocks_8_txt_mlp_0.lora_down.weight : \n", + "tensor(2.2188, dtype=torch.float16)\n", + "lora_unet_double_blocks_8_txt_mod_lin.lora_down.weight : \n", + "tensor(1.5742, dtype=torch.float16)\n", + "lora_unet_double_blocks_9_img_attn_proj.lora_down.weight : \n", + "tensor(2.3125, dtype=torch.float16)\n", + "lora_unet_double_blocks_9_img_attn_qkv.lora_down.weight : \n", + "tensor(1.4854, dtype=torch.float16)\n", + "lora_unet_double_blocks_9_img_mlp_0.lora_down.weight : \n", + "tensor(1.9492, dtype=torch.float16)\n", + "lora_unet_double_blocks_9_img_mod_lin.lora_down.weight : \n", + "tensor(2.2949, dtype=torch.float16)\n", + "lora_unet_double_blocks_9_txt_attn_proj.lora_down.weight : \n", + "tensor(2.0781, dtype=torch.float16)\n", + "lora_unet_double_blocks_9_txt_attn_qkv.lora_down.weight : \n", + "tensor(2.6172, dtype=torch.float16)\n", + "lora_unet_double_blocks_9_txt_mlp_0.lora_down.weight : \n", + "tensor(3.1367, dtype=torch.float16)\n", + "lora_unet_double_blocks_9_txt_mod_lin.lora_down.weight : \n", + "tensor(1.2451, dtype=torch.float16)\n", + "lora_unet_single_blocks_0_linear1.lora_down.weight : \n", + "tensor(2.4375, dtype=torch.float16)\n", + "lora_unet_single_blocks_0_modulation_lin.lora_down.weight : \n", + "tensor(3.5684, dtype=torch.float16)\n", + "lora_unet_single_blocks_10_linear1.lora_down.weight : \n", + "tensor(2.6328, dtype=torch.float16)\n", + "lora_unet_single_blocks_10_modulation_lin.lora_down.weight : \n", + "tensor(2.9961, dtype=torch.float16)\n", + "lora_unet_single_blocks_11_linear1.lora_down.weight : \n", + "tensor(3.1211, dtype=torch.float16)\n", + "lora_unet_single_blocks_11_modulation_lin.lora_down.weight : \n", + "tensor(3.3672, dtype=torch.float16)\n", + "lora_unet_single_blocks_12_linear1.lora_down.weight : \n", + "tensor(3.0293, dtype=torch.float16)\n", + "lora_unet_single_blocks_12_modulation_lin.lora_down.weight : \n", + "tensor(3.6602, dtype=torch.float16)\n", + "lora_unet_single_blocks_13_linear1.lora_down.weight : \n", + "tensor(2.5918, dtype=torch.float16)\n", + "lora_unet_single_blocks_13_modulation_lin.lora_down.weight : \n", + "tensor(4.6367, dtype=torch.float16)\n", + "lora_unet_single_blocks_14_linear1.lora_down.weight : \n", + "tensor(2.0215, dtype=torch.float16)\n", + "lora_unet_single_blocks_14_modulation_lin.lora_down.weight : \n", + "tensor(3.5371, dtype=torch.float16)\n", + "lora_unet_single_blocks_15_linear1.lora_down.weight : \n", + "tensor(2.1719, dtype=torch.float16)\n", + "lora_unet_single_blocks_15_modulation_lin.lora_down.weight : \n", + "tensor(4.2812, dtype=torch.float16)\n", + "lora_unet_single_blocks_16_linear1.lora_down.weight : \n", + "tensor(2.1992, dtype=torch.float16)\n", + "lora_unet_single_blocks_16_modulation_lin.lora_down.weight : \n", + "tensor(4.1094, dtype=torch.float16)\n", + "lora_unet_single_blocks_17_linear1.lora_down.weight : \n", + "tensor(2.0703, dtype=torch.float16)\n", + "lora_unet_single_blocks_17_modulation_lin.lora_down.weight : \n", + "tensor(2.9277, dtype=torch.float16)\n", + "lora_unet_single_blocks_18_linear1.lora_down.weight : \n", + "tensor(2.0371, dtype=torch.float16)\n", + "lora_unet_single_blocks_18_modulation_lin.lora_down.weight : \n", + "tensor(2.6133, dtype=torch.float16)\n", + "lora_unet_single_blocks_19_linear1.lora_down.weight : \n", + "tensor(2.0723, dtype=torch.float16)\n", + "lora_unet_single_blocks_19_modulation_lin.lora_down.weight : \n", + "tensor(3.4980, dtype=torch.float16)\n", + "lora_unet_single_blocks_1_linear1.lora_down.weight : \n", + "tensor(1.7432, dtype=torch.float16)\n", + "lora_unet_single_blocks_1_modulation_lin.lora_down.weight : \n", + "tensor(2.3848, dtype=torch.float16)\n", + "lora_unet_single_blocks_20_linear1.lora_down.weight : \n", + "tensor(2.0137, dtype=torch.float16)\n", + "lora_unet_single_blocks_20_modulation_lin.lora_down.weight : \n", + "tensor(2.8203, dtype=torch.float16)\n", + "lora_unet_single_blocks_21_linear1.lora_down.weight : \n", + "tensor(1.8955, dtype=torch.float16)\n", + "lora_unet_single_blocks_21_modulation_lin.lora_down.weight : \n", + "tensor(2.7305, dtype=torch.float16)\n", + "lora_unet_single_blocks_22_linear1.lora_down.weight : \n", + "tensor(2.7559, dtype=torch.float16)\n", + "lora_unet_single_blocks_22_modulation_lin.lora_down.weight : \n", + "tensor(4.6133, dtype=torch.float16)\n", + "lora_unet_single_blocks_23_linear1.lora_down.weight : \n", + "tensor(2.5508, dtype=torch.float16)\n", + "lora_unet_single_blocks_23_modulation_lin.lora_down.weight : \n", + "tensor(4.4180, dtype=torch.float16)\n", + "lora_unet_single_blocks_24_linear1.lora_down.weight : \n", + "tensor(1.9219, dtype=torch.float16)\n", + "lora_unet_single_blocks_24_modulation_lin.lora_down.weight : \n", + "tensor(2.9453, dtype=torch.float16)\n", + "lora_unet_single_blocks_25_linear1.lora_down.weight : \n", + "tensor(2.7539, dtype=torch.float16)\n", + "lora_unet_single_blocks_25_modulation_lin.lora_down.weight : \n", + "tensor(4.5938, dtype=torch.float16)\n", + "lora_unet_single_blocks_26_linear1.lora_down.weight : \n", + "tensor(3.3750, dtype=torch.float16)\n", + "lora_unet_single_blocks_26_modulation_lin.lora_down.weight : \n", + "tensor(4.7344, dtype=torch.float16)\n", + "lora_unet_single_blocks_27_linear1.lora_down.weight : \n", + "tensor(2.3809, dtype=torch.float16)\n", + "lora_unet_single_blocks_27_modulation_lin.lora_down.weight : \n", + "tensor(4.9883, dtype=torch.float16)\n", + "lora_unet_single_blocks_28_linear1.lora_down.weight : \n", + "tensor(3.0859, dtype=torch.float16)\n", + "lora_unet_single_blocks_28_modulation_lin.lora_down.weight : \n", + "tensor(5.7539, dtype=torch.float16)\n", + "lora_unet_single_blocks_29_linear1.lora_down.weight : \n", + "tensor(2.3242, dtype=torch.float16)\n", + "lora_unet_single_blocks_29_modulation_lin.lora_down.weight : \n", + "tensor(3.9160, dtype=torch.float16)\n", + "lora_unet_single_blocks_2_linear1.lora_down.weight : \n", + "tensor(2.1406, dtype=torch.float16)\n", + "lora_unet_single_blocks_2_modulation_lin.lora_down.weight : \n", + "tensor(2.1621, dtype=torch.float16)\n", + "lora_unet_single_blocks_30_linear1.lora_down.weight : \n", + "tensor(2.1211, dtype=torch.float16)\n", + "lora_unet_single_blocks_30_modulation_lin.lora_down.weight : \n", + "tensor(4.8516, dtype=torch.float16)\n", + "lora_unet_single_blocks_31_linear1.lora_down.weight : \n", + "tensor(2.2773, dtype=torch.float16)\n", + "lora_unet_single_blocks_31_modulation_lin.lora_down.weight : \n", + "tensor(4.1367, dtype=torch.float16)\n", + "lora_unet_single_blocks_32_linear1.lora_down.weight : \n", + "tensor(2.5273, dtype=torch.float16)\n", + "lora_unet_single_blocks_32_modulation_lin.lora_down.weight : \n", + "tensor(5.0508, dtype=torch.float16)\n", + "lora_unet_single_blocks_33_linear1.lora_down.weight : \n", + "tensor(2.7051, dtype=torch.float16)\n", + "lora_unet_single_blocks_33_modulation_lin.lora_down.weight : \n", + "tensor(5.2930, dtype=torch.float16)\n", + "lora_unet_single_blocks_34_linear1.lora_down.weight : \n", + "tensor(2.6738, dtype=torch.float16)\n", + "lora_unet_single_blocks_34_modulation_lin.lora_down.weight : \n", + "tensor(4.7852, dtype=torch.float16)\n", + "lora_unet_single_blocks_35_linear1.lora_down.weight : \n", + "tensor(2.5117, dtype=torch.float16)\n", + "lora_unet_single_blocks_35_modulation_lin.lora_down.weight : \n", + "tensor(6.7734, dtype=torch.float16)\n", + "lora_unet_single_blocks_36_linear1.lora_down.weight : \n", + "tensor(1.8418, dtype=torch.float16)\n", + "lora_unet_single_blocks_36_modulation_lin.lora_down.weight : \n", + "tensor(6.5859, dtype=torch.float16)\n", + "lora_unet_single_blocks_37_linear1.lora_down.weight : \n", + "tensor(2.4473, dtype=torch.float16)\n", + "lora_unet_single_blocks_37_modulation_lin.lora_down.weight : \n", + "tensor(2.5742, dtype=torch.float16)\n", + "lora_unet_single_blocks_3_linear1.lora_down.weight : \n", + "tensor(2.5566, dtype=torch.float16)\n", + "lora_unet_single_blocks_3_modulation_lin.lora_down.weight : \n", + "tensor(4.7148, dtype=torch.float16)\n", + "lora_unet_single_blocks_4_linear1.lora_down.weight : \n", + "tensor(2.2832, dtype=torch.float16)\n", + "lora_unet_single_blocks_4_modulation_lin.lora_down.weight : \n", + "tensor(2.0566, dtype=torch.float16)\n", + "lora_unet_single_blocks_5_linear1.lora_down.weight : \n", + "tensor(2.2109, dtype=torch.float16)\n", + "lora_unet_single_blocks_5_modulation_lin.lora_down.weight : \n", + "tensor(2.7793, dtype=torch.float16)\n", + "lora_unet_single_blocks_6_linear1.lora_down.weight : \n", + "tensor(3.0176, dtype=torch.float16)\n", + "lora_unet_single_blocks_6_modulation_lin.lora_down.weight : \n", + "tensor(2.9180, dtype=torch.float16)\n", + "lora_unet_single_blocks_7_linear1.lora_down.weight : \n", + "tensor(2.2461, dtype=torch.float16)\n", + "lora_unet_single_blocks_7_modulation_lin.lora_down.weight : \n", + "tensor(2.1074, dtype=torch.float16)\n", + "lora_unet_single_blocks_8_linear1.lora_down.weight : \n", + "tensor(3.0391, dtype=torch.float16)\n", + "lora_unet_single_blocks_8_modulation_lin.lora_down.weight : \n", + "tensor(2.0039, dtype=torch.float16)\n", + "lora_unet_single_blocks_9_linear1.lora_down.weight : \n", + "tensor(3.8789, dtype=torch.float16)\n", + "lora_unet_single_blocks_9_modulation_lin.lora_down.weight : \n", + "tensor(4.0547, dtype=torch.float16)\n" + ] + } + ] + }, + { + "cell_type": "markdown", + "source": [ + "<---- Upload your civiai trained .safetensor file to Google Colab before running the next cell\n", + "\n" + ], + "metadata": { + "id": "oDAUwfFzqzgj" + } + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "WQZ3BZn1p-pw" + }, + "outputs": [], + "source": [ + "civiai_lora = '' # @param {type:'string' ,placeholder:'ex. civitai_trained_e19.safetensors'}\n", + "tensor_art_filename = '' # @param {type:'string' ,placeholder:'ex. e19.safetensors'}\n", + "%cd /content/\n", + "tgt = load_file(f'{civiai_lora}')\n", + "for key in tgt:\n", + " tgt[f'{key}'] = tgt[f'{key}'].to(dtype=torch.float16)\n", + "%cd /content/\n", + "save_file(tgt , f'{tensor_art_filename}')" + ] + }, + { + "cell_type": "markdown", + "source": [ + "Download the new .safetensor file to your device.\n", + "\n", + "Downloading from CoLab Notebook will seemingly do nothing for ~5min. Then the file will download , so be patient.\n", + "\n", + "For faster/more consistent downloads , download your .safetensor file from your Google Drive" + ], + "metadata": { + "id": "blnBW-U4rAS7" + } + } + ] +} \ No newline at end of file