diff --git "a/LoRa_Merge_Script.ipynb" "b/LoRa_Merge_Script.ipynb" deleted file mode 100644--- "a/LoRa_Merge_Script.ipynb" +++ /dev/null @@ -1,3699 +0,0 @@ -{ - "nbformat": 4, - "nbformat_minor": 0, - "metadata": { - "colab": { - "provenance": [] - }, - "kernelspec": { - "name": "python3", - "display_name": "Python 3" - }, - "language_info": { - "name": "python" - } - }, - "cells": [ - { - "cell_type": "markdown", - "source": [ - "# Cast civitai trained LoRa in torch.bfloat16 to Tensor Art Compatible torch.float16 dtype\n", - "\n", - "Created by Adcom: https://tensor.art/u/743241123023077878" - ], - "metadata": { - "id": "YDCnQpDdqDe4" - } - }, - { - "cell_type": "code", - "source": [ - "#initialize\n", - "import torch\n", - "from safetensors.torch import load_file, save_file\n", - "from google.colab import drive\n", - "drive.mount('/content/drive')" - ], - "metadata": { - "id": "CBVTifA_ZwdC", - "colab": { - "base_uri": "https://localhost:8080/" - }, - "outputId": "8ce58389-8263-4016-8ebe-f61708ffef95" - }, - "execution_count": 1, - "outputs": [ - { - "output_type": "stream", - "name": "stdout", - "text": [ - "Mounted at /content/drive\n" - ] - } - ] - }, - { - "cell_type": "code", - "source": [ - "\n", - "import torch\n", - "from safetensors.torch import load_file, save_file\n", - "import torch.nn as nn\n", - "from torch import linalg as LA\n", - "import os\n", - "import math\n", - "import random\n", - "import numpy as np\n", - "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n", - "\n", - "# For pcnt = 30 , 'filter_and_save' will keep all top 30 % values\n", - "#, and the lowest (negative) 30% values for each layer delta_W in this lora\n", - "# Then save the new filtered lora as a .safetensor file\n", - "def filter_and_save(_lora , savefile_name, new_rank , new_alpha, resolution):\n", - " lora = {}\n", - " count = 0\n", - " for key in _lora:count = count + 1\n", - " NUM_ITEMS = count\n", - " count = 0\n", - " thresh = resolution*0.000001 # 1e-6\n", - " #-------#\n", - " for key in _lora:\n", - " if f'{key}'.find('alpha') > -1:\n", - " lora[f'{key}'] = torch.tensor(new_alpha).to(device = device , dtype = torch.float32)\n", - " count = count + 1\n", - " print(f'{count} / {NUM_ITEMS}')\n", - " continue\n", - " #------#\n", - " if not f'{key}'.find('lora_down') > -1: continue\n", - " up = f'{key}'.replace('lora_down' , 'lora_up')\n", - " down = f'{key}'\n", - " #-------#\n", - " delta_W = torch.matmul(_lora[up],_lora[down]).to(device = device , dtype=torch.float32)\n", - " #---#\n", - " N = delta_W.numel()\n", - " y = delta_W.flatten().to(device = device , dtype=torch.float32)\n", - " values,indices = torch.sort(y, descending = False) # smallest -> largest elements\n", - " y = torch.zeros(y.shape).to(device = device , dtype=torch.float32)\n", - " y[indices[values>thresh]] = 1\n", - " y[indices[values<-thresh]] = 1\n", - " y = y.unflatten(0,delta_W.shape).to(device = device , dtype=torch.float32)\n", - " delta_W = torch.mul(delta_W,y).to(device = device , dtype=torch.float32)\n", - " #------#\n", - " tmp={}\n", - " tmp['u'], tmp['s'], tmp['Vh'] = torch.svd(delta_W)\n", - " tmp['u'] = tmp['u'][:,: new_rank]\n", - " tmp['s'] = tmp['s'][: new_rank]\n", - " #-------#\n", - " tmp['u'] = torch.round(torch.matmul(tmp['u'], torch.diag(tmp['s'])),decimals=6)\n", - " tmp['Vh'] = torch.round(tmp['Vh'].t()[: new_rank,:],decimals=6)\n", - " #-------#\n", - " for key in tmp:tmp[f'{key}'] = tmp[f'{key}'].contiguous()\n", - " lora[up] = tmp['u'].to(device = device , dtype=torch.float32)\n", - " lora[down] = tmp['Vh'].to(device = device , dtype=torch.float32)\n", - " #-------#\n", - " count = count + 2\n", - " print(f'{count} / {NUM_ITEMS}')\n", - " #-------#\n", - " print(f'done!')\n", - " print(f'casting params to fp16....')\n", - " for key in _lora: lora[f'{key}'] = lora[f'{key}'].to(device = device , dtype=torch.float16)\n", - " #-------#\n", - " print(f'done!')\n", - " print(f'saving {savefile_name}...')\n", - " save_file(lora , f'{savefile_name}')\n", - "#--------#\n", - "\n", - "def count_zeros(_lora, resolution):\n", - " count = 0\n", - " for key in _lora:count = count + 1\n", - " NUM_ITEMS = count\n", - " count = 0\n", - " #-----#\n", - " thresh = resolution*0.000001 # 1e-6\n", - "\n", - " print(f'at resolution = {resolution}e-6 :')\n", - " for key in _lora:\n", - " if f'{key}'.find('alpha') > -1:\n", - " count = count + 1\n", - " continue\n", - " #------#\n", - " if not f'{key}'.find('lora_down') > -1: continue\n", - " up = f'{key}'.replace('lora_down' , 'lora_up')\n", - " down = f'{key}'\n", - " #-------#\n", - " delta_W = torch.matmul(_lora[up],_lora[down]).to(device = device , dtype=torch.float32)\n", - " N = delta_W.numel()\n", - " y = delta_W.flatten().to(device = device , dtype=torch.float32)\n", - " values,indices = torch.sort(y, descending = False) # smallest -> largest elements\n", - " y = torch.ones(y.shape).to(device = device , dtype=torch.float32)\n", - " y[indices[values>thresh]] = 0\n", - " neg_pcnt = round((100*torch.sum(y) / N).item(),2)\n", - " y[indices[values<-thresh]] = 0\n", - " count = count + 2\n", - " pcnt = round((100*torch.sum(y) / N).item(),2)\n", - " neg_pcnt = round(neg_pcnt - pcnt,2) # remove zero % from neg_pcnt\n", - " pos_pcnt = round(100- pcnt - neg_pcnt,2)\n", - " print(f'at {count} / {NUM_ITEMS} : {pcnt} % zeros ,{pos_pcnt} % pos. , {neg_pcnt} % neg ')\n", - " #------#\n", - "#-----#\n", - "\n", - "def merge_and_save(_lora1 , _lora2 , _lora3, savefile_name, new_rank , new_alpha, resolution):\n", - " lora = {}\n", - " count = 0\n", - " for key in _lora1:count = count + 1\n", - " NUM_ITEMS = count\n", - " count = 0\n", - " thresh = resolution*0.000001 # 1e-6\n", - "\n", - " #-------#\n", - " for key in _lora1:\n", - " if f'{key}'.find('alpha') > -1:\n", - " lora[f'{key}'] = torch.tensor(new_alpha).to(device = device , dtype = torch.float32)\n", - " count = count + 1\n", - " print(f'{count} / {NUM_ITEMS}')\n", - " continue\n", - " #------#\n", - " #if count<462:\n", - " # count = count + 2\n", - " # continue\n", - " if not f'{key}'.find('lora_down') > -1: continue\n", - " up = f'{key}'.replace('lora_down' , 'lora_up')\n", - " down = f'{key}'\n", - " #-------#\n", - "\n", - " # Setup\n", - " delta_W = torch.matmul(_lora1[up]*0,_lora1[down]*0).to(device = device, dtype=torch.float32)\n", - " tgt_shape = delta_W.shape\n", - " N = delta_W.numel()\n", - " delta_W = torch.zeros(N).to(device = device , dtype=torch.float32)\n", - " #-----#\n", - "\n", - " #Positives\n", - " Y = torch.zeros(3,N).to(device = device , dtype=torch.float32)\n", - " Y[0] = torch.matmul(_lora1[up],_lora1[down]).flatten().to(device = device , dtype=torch.float32)\n", - " Y[1] = torch.matmul(_lora2[up],_lora2[down]).flatten().to(device = device , dtype=torch.float32)\n", - " Y[2] = torch.matmul(_lora2[up],_lora2[down]).flatten().to(device = device , dtype=torch.float32)\n", - " Y[torch.abs(Y)0,dim=1) + 0.1\n", - " elect = torch.sum(Y<0,dim=1) + 0.1\n", - " elect = (num_positives>=elect)\n", - " Y[Y<0] = 0\n", - " Y = torch.sum(Y, dim=1).to(device = device , dtype=torch.float32)\n", - " delta_W[elect] = torch.round((Y[elect]/num_positives[elect]),decimals=6).to(device = device , dtype=torch.float32)\n", - " #-----#\n", - "\n", - " #Negatives\n", - " Y = torch.zeros(3,N).to(device = device , dtype=torch.float32)\n", - " Y[0] = torch.matmul(_lora1[up],_lora1[down]).flatten().to(device = device , dtype=torch.float32)\n", - " Y[1] = torch.matmul(_lora2[up],_lora2[down]).flatten().to(device = device , dtype=torch.float32)\n", - " Y[2] = torch.matmul(_lora2[up],_lora2[down]).flatten().to(device = device , dtype=torch.float32)\n", - " Y[torch.abs(Y)0,dim=1) + 0.1\n", - " elect = (elect0] = 0\n", - " Y = torch.sum(Y, dim=1).to(device = device , dtype=torch.float32)\n", - " delta_W[elect] = torch.round(Y[elect]/num_positives[elect],decimals=6).to(device = device , dtype=torch.float32)\n", - " #----#\n", - "\n", - " # Free up memory prior to SVD\n", - " delta_W = delta_W.unflatten(0,tgt_shape).to(device = device , dtype=torch.float32)\n", - " delta_W = delta_W.clone().detach()\n", - " Y = {}\n", - " num_positives = {}\n", - " num_negatives = {}\n", - " elect = {}\n", - " #-----#\n", - "\n", - " # Run SVD (Single Value Decomposition)\n", - " #to get the new lora_up and lora_down for delta_W\n", - " tmp={}\n", - " tmp['u'], tmp['s'], tmp['Vh'] = torch.svd(delta_W)\n", - " tmp['u'] = tmp['u'][:,: new_rank]\n", - " tmp['s'] = tmp['s'][: new_rank]\n", - " tmp['u'] = torch.matmul(tmp['u'], torch.diag(tmp['s']))\n", - " tmp['Vh'] = tmp['Vh'].t()[: new_rank,:]\n", - " for key in tmp:tmp[f'{key}'] = tmp[f'{key}'].contiguous()\n", - " lora[up] = torch.round(tmp['u'],decimals=6).to(device = device , dtype=torch.float32)\n", - " lora[down] = torch.round(tmp['Vh'],decimals=6).to(device = device , dtype=torch.float32)\n", - " #-------#\n", - "\n", - " count = count + 2\n", - " print(f'{count} / {NUM_ITEMS}')\n", - " #----#\n", - " #--------#\n", - " print(f'done!')\n", - " print(f'casting params to fp16....')\n", - " for key in lora: lora[f'{key}'] = lora[f'{key}'].to(device = device , dtype=torch.float16)\n", - " #-------#\n", - " print(f'done!')\n", - " print(f'saving {savefile_name}...')\n", - " save_file(lora , f'{savefile_name}')\n", - "#------#\n", - "\n", - "new_rank = 32\n", - "new_alpha = math.floor(new_rank/2)\n", - "resolution = 200\n", - "name = 'yeero_euro_puff'\n", - "yeero = load_file('/kaggle/input/flux-loras/yeero_100_r32_16alpha.safetensors')\n", - "euro = load_file('/kaggle/input/flux-loras/euro_100_r32_16alpha.safetensors')\n", - "puff = load_file('/kaggle/input/flux-loras/puff_200_r32_16alpha.safetensors')\n", - "savefile_name = f'{name}_{resolution}_r{new_rank}_a{new_alpha}.safetensors'\n", - "\n", - "#tgt = load_file(f'/kaggle/input/flux-loras/{name}_{resolution}_r32_16alpha.safetensors')\n", - "for key in yeero:\n", - " yeero[f'{key}'] = yeero[f'{key}'].to(device = device , dtype = torch.float32)\n", - " euro[f'{key}'] = euro[f'{key}'].to(device = device , dtype = torch.float32)\n", - " puff[f'{key}'] = puff[f'{key}'].to(device = device , dtype = torch.float32)\n", - "#-----#\n", - "print(f'for {name}.safetensors at scale = (rank/alpha) = 0.5')\n", - "merge_and_save(yeero , euro , puff, savefile_name, new_rank , new_alpha, resolution)\n", - "\n", - "\n", - "#Yeero + Scale + Puff\n", - "#filter_and_save(tgt , f'{name}_{resolution}_r{new_rank}_{new_alpha}alpha.safetensors' , new_rank , new_alpha, resolution)\n" - ], - "metadata": { - "id": "SKYzFxehkfG8" - }, - "execution_count": 32, - "outputs": [] - }, - { - "cell_type": "code", - "source": [ - "from safetensors.torch import load_file, save_file\n", - "_puff = load_file('/content/drive/MyDrive/Saved from Chrome/pfbkFLUX.safetensors')\n", - "puff = {}\n", - "\n", - "#alpha = 64\n", - "#rank = 64\n", - "\n", - "# = > so scale = 1\n", - "#desired scale = 0.5\n", - "# so multiply matrices by 2 and set alpha to 32\n", - "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n", - "for key in _puff:\n", - " if f'{key}'.find('alpha')>-1:\n", - " puff[f'{key}'] = torch.tensor(32).to(device=device , dtype = torch.float16)\n", - " #print(puff[f'{key}'])\n", - " continue\n", - " puff[f'{key}'] = 2*_puff[f'{key}'].to(device=device , dtype = torch.float16)\n", - "\n", - " #print(puff[f'{key}'].shape)\n", - "\n", - "save_file(puff, 'puff.safetensors')" - ], - "metadata": { - "id": "U8fCk78GimS8" - }, - "execution_count": 28, - "outputs": [] - }, - { - "cell_type": "code", - "source": [ - "from safetensors.torch import load_file, save_file\n", - "_tongue = load_file('/content/drive/MyDrive/Saved from Chrome/tongue-flux-v2.1.safetensors')\n", - "tongue = {}\n", - "# Scale = 32/16 = 2\n", - "# Desired scale = 0.5 => multiply all matrices by 4 and set alpha to 8\n", - "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n", - "for key in _tongue:\n", - " if f'{key}'.find('alpha')>-1:\n", - " tongue[f'{key}'] = torch.tensor(8).to(device=device , dtype = torch.float16)\n", - " continue\n", - " #-------#\n", - " tongue[f'{key}'] = 4*_tongue[f'{key}'].to(device=device , dtype = torch.float16)\n", - "#-------#\n", - "save_file(tongue, 'tongue.safetensors')" - ], - "metadata": { - "id": "lFNa6vgrgdSA" - }, - "execution_count": 23, - "outputs": [] - }, - { - "cell_type": "code", - "source": [ - "\n", - "\n", - "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n", - "\n", - "_oily = load_file('/content/drive/MyDrive/Saved from Chrome/OiledSkin_FluxDev.safetensors')\n", - "\n", - "star = load_file('/content/drive/MyDrive/Saved from Chrome/star_100_r32_16alpha.safetensors')\n", - "#A = vs , B = u\n", - "#lora_down = A , lora_up = B\n", - "\n", - "oily = {}\n", - "for key in _oily:\n", - " if not f'{key}'.find('_A.')>-1:continue\n", - " A = f'{key}'\n", - " B = f'{key}'.replace('_A.','_B.')\n", - " down = f'{key}'.replace('_A.','_down.')\n", - " up = f'{key}'.replace('_A.','_up.')\n", - " #-----#\n", - " oily[f'{up}'] = _oily[f'{B}'].to(device = device , dtype=torch.float16)\n", - " oily[f'{down}'] = _oily[f'{A}'].to(device = device , dtype=torch.float16)\n", - " #------#\n", - " if not f'{key}'.find('to_k.')>-1:continue\n", - " k = f'{key}'\n", - " q = k.replace('to_k.','to_q.')\n", - " v = k.replace('to_k.','to_v.')\n", - "\n", - "print(\"---------OILY---------\")\n", - "for key in oily:\n", - " print(key)\n", - " #if f'{key}'.find('alpha')>-1:print(key)\n", - "\n", - "print(\"---------STAR---------\")\n", - "for key in star:\n", - " break\n", - " print(key)" - ], - "metadata": { - "id": "1oxeJYHRqxQC", - "collapsed": true, - "colab": { - "base_uri": "https://localhost:8080/" - }, - "outputId": "12e3a407-f9d1-403e-949b-31330be59577" - }, - "execution_count": 12, - "outputs": [ - { - "output_type": "stream", - "name": "stdout", - "text": [ - "---------OILY---------\n", - "transformer.single_transformer_blocks.0.attn.to_k.lora_up.weight\n", - "transformer.single_transformer_blocks.0.attn.to_k.lora_down.weight\n", - "transformer.single_transformer_blocks.0.attn.to_q.lora_up.weight\n", - "transformer.single_transformer_blocks.0.attn.to_q.lora_down.weight\n", - "transformer.single_transformer_blocks.0.attn.to_v.lora_up.weight\n", - "transformer.single_transformer_blocks.0.attn.to_v.lora_down.weight\n", - "transformer.single_transformer_blocks.0.norm.linear.lora_up.weight\n", - "transformer.single_transformer_blocks.0.norm.linear.lora_down.weight\n", - "transformer.single_transformer_blocks.0.proj_mlp.lora_up.weight\n", - "transformer.single_transformer_blocks.0.proj_mlp.lora_down.weight\n", - "transformer.single_transformer_blocks.0.proj_out.lora_up.weight\n", - "transformer.single_transformer_blocks.0.proj_out.lora_down.weight\n", - "transformer.single_transformer_blocks.1.attn.to_k.lora_up.weight\n", - "transformer.single_transformer_blocks.1.attn.to_k.lora_down.weight\n", - "transformer.single_transformer_blocks.1.attn.to_q.lora_up.weight\n", - "transformer.single_transformer_blocks.1.attn.to_q.lora_down.weight\n", - "transformer.single_transformer_blocks.1.attn.to_v.lora_up.weight\n", - "transformer.single_transformer_blocks.1.attn.to_v.lora_down.weight\n", - "transformer.single_transformer_blocks.1.norm.linear.lora_up.weight\n", - "transformer.single_transformer_blocks.1.norm.linear.lora_down.weight\n", - "transformer.single_transformer_blocks.1.proj_mlp.lora_up.weight\n", - "transformer.single_transformer_blocks.1.proj_mlp.lora_down.weight\n", - "transformer.single_transformer_blocks.1.proj_out.lora_up.weight\n", - "transformer.single_transformer_blocks.1.proj_out.lora_down.weight\n", - "transformer.single_transformer_blocks.10.attn.to_k.lora_up.weight\n", - "transformer.single_transformer_blocks.10.attn.to_k.lora_down.weight\n", - "transformer.single_transformer_blocks.10.attn.to_q.lora_up.weight\n", - "transformer.single_transformer_blocks.10.attn.to_q.lora_down.weight\n", - "transformer.single_transformer_blocks.10.attn.to_v.lora_up.weight\n", - "transformer.single_transformer_blocks.10.attn.to_v.lora_down.weight\n", - "transformer.single_transformer_blocks.10.norm.linear.lora_up.weight\n", - "transformer.single_transformer_blocks.10.norm.linear.lora_down.weight\n", - "transformer.single_transformer_blocks.10.proj_mlp.lora_up.weight\n", - "transformer.single_transformer_blocks.10.proj_mlp.lora_down.weight\n", - "transformer.single_transformer_blocks.10.proj_out.lora_up.weight\n", - "transformer.single_transformer_blocks.10.proj_out.lora_down.weight\n", - "transformer.single_transformer_blocks.11.attn.to_k.lora_up.weight\n", - "transformer.single_transformer_blocks.11.attn.to_k.lora_down.weight\n", - "transformer.single_transformer_blocks.11.attn.to_q.lora_up.weight\n", - "transformer.single_transformer_blocks.11.attn.to_q.lora_down.weight\n", - "transformer.single_transformer_blocks.11.attn.to_v.lora_up.weight\n", - "transformer.single_transformer_blocks.11.attn.to_v.lora_down.weight\n", - "transformer.single_transformer_blocks.11.norm.linear.lora_up.weight\n", - "transformer.single_transformer_blocks.11.norm.linear.lora_down.weight\n", - "transformer.single_transformer_blocks.11.proj_mlp.lora_up.weight\n", - "transformer.single_transformer_blocks.11.proj_mlp.lora_down.weight\n", - "transformer.single_transformer_blocks.11.proj_out.lora_up.weight\n", - "transformer.single_transformer_blocks.11.proj_out.lora_down.weight\n", - "transformer.single_transformer_blocks.12.attn.to_k.lora_up.weight\n", - "transformer.single_transformer_blocks.12.attn.to_k.lora_down.weight\n", - "transformer.single_transformer_blocks.12.attn.to_q.lora_up.weight\n", - "transformer.single_transformer_blocks.12.attn.to_q.lora_down.weight\n", - "transformer.single_transformer_blocks.12.attn.to_v.lora_up.weight\n", - "transformer.single_transformer_blocks.12.attn.to_v.lora_down.weight\n", - "transformer.single_transformer_blocks.12.norm.linear.lora_up.weight\n", - "transformer.single_transformer_blocks.12.norm.linear.lora_down.weight\n", - "transformer.single_transformer_blocks.12.proj_mlp.lora_up.weight\n", - "transformer.single_transformer_blocks.12.proj_mlp.lora_down.weight\n", - "transformer.single_transformer_blocks.12.proj_out.lora_up.weight\n", - "transformer.single_transformer_blocks.12.proj_out.lora_down.weight\n", - "transformer.single_transformer_blocks.13.attn.to_k.lora_up.weight\n", - "transformer.single_transformer_blocks.13.attn.to_k.lora_down.weight\n", - "transformer.single_transformer_blocks.13.attn.to_q.lora_up.weight\n", - "transformer.single_transformer_blocks.13.attn.to_q.lora_down.weight\n", - "transformer.single_transformer_blocks.13.attn.to_v.lora_up.weight\n", - "transformer.single_transformer_blocks.13.attn.to_v.lora_down.weight\n", - "transformer.single_transformer_blocks.13.norm.linear.lora_up.weight\n", - "transformer.single_transformer_blocks.13.norm.linear.lora_down.weight\n", - "transformer.single_transformer_blocks.13.proj_mlp.lora_up.weight\n", - "transformer.single_transformer_blocks.13.proj_mlp.lora_down.weight\n", - "transformer.single_transformer_blocks.13.proj_out.lora_up.weight\n", - "transformer.single_transformer_blocks.13.proj_out.lora_down.weight\n", - "transformer.single_transformer_blocks.14.attn.to_k.lora_up.weight\n", - "transformer.single_transformer_blocks.14.attn.to_k.lora_down.weight\n", - "transformer.single_transformer_blocks.14.attn.to_q.lora_up.weight\n", - "transformer.single_transformer_blocks.14.attn.to_q.lora_down.weight\n", - "transformer.single_transformer_blocks.14.attn.to_v.lora_up.weight\n", - "transformer.single_transformer_blocks.14.attn.to_v.lora_down.weight\n", - "transformer.single_transformer_blocks.14.norm.linear.lora_up.weight\n", - "transformer.single_transformer_blocks.14.norm.linear.lora_down.weight\n", - "transformer.single_transformer_blocks.14.proj_mlp.lora_up.weight\n", - "transformer.single_transformer_blocks.14.proj_mlp.lora_down.weight\n", - "transformer.single_transformer_blocks.14.proj_out.lora_up.weight\n", - "transformer.single_transformer_blocks.14.proj_out.lora_down.weight\n", - "transformer.single_transformer_blocks.15.attn.to_k.lora_up.weight\n", - "transformer.single_transformer_blocks.15.attn.to_k.lora_down.weight\n", - "transformer.single_transformer_blocks.15.attn.to_q.lora_up.weight\n", - "transformer.single_transformer_blocks.15.attn.to_q.lora_down.weight\n", - "transformer.single_transformer_blocks.15.attn.to_v.lora_up.weight\n", - "transformer.single_transformer_blocks.15.attn.to_v.lora_down.weight\n", - "transformer.single_transformer_blocks.15.norm.linear.lora_up.weight\n", - "transformer.single_transformer_blocks.15.norm.linear.lora_down.weight\n", - "transformer.single_transformer_blocks.15.proj_mlp.lora_up.weight\n", - "transformer.single_transformer_blocks.15.proj_mlp.lora_down.weight\n", - "transformer.single_transformer_blocks.15.proj_out.lora_up.weight\n", - "transformer.single_transformer_blocks.15.proj_out.lora_down.weight\n", - "transformer.single_transformer_blocks.16.attn.to_k.lora_up.weight\n", - "transformer.single_transformer_blocks.16.attn.to_k.lora_down.weight\n", - "transformer.single_transformer_blocks.16.attn.to_q.lora_up.weight\n", - "transformer.single_transformer_blocks.16.attn.to_q.lora_down.weight\n", - "transformer.single_transformer_blocks.16.attn.to_v.lora_up.weight\n", - "transformer.single_transformer_blocks.16.attn.to_v.lora_down.weight\n", - "transformer.single_transformer_blocks.16.norm.linear.lora_up.weight\n", - "transformer.single_transformer_blocks.16.norm.linear.lora_down.weight\n", - "transformer.single_transformer_blocks.16.proj_mlp.lora_up.weight\n", - "transformer.single_transformer_blocks.16.proj_mlp.lora_down.weight\n", - "transformer.single_transformer_blocks.16.proj_out.lora_up.weight\n", - "transformer.single_transformer_blocks.16.proj_out.lora_down.weight\n", - "transformer.single_transformer_blocks.17.attn.to_k.lora_up.weight\n", - "transformer.single_transformer_blocks.17.attn.to_k.lora_down.weight\n", - "transformer.single_transformer_blocks.17.attn.to_q.lora_up.weight\n", - "transformer.single_transformer_blocks.17.attn.to_q.lora_down.weight\n", - "transformer.single_transformer_blocks.17.attn.to_v.lora_up.weight\n", - "transformer.single_transformer_blocks.17.attn.to_v.lora_down.weight\n", - "transformer.single_transformer_blocks.17.norm.linear.lora_up.weight\n", - "transformer.single_transformer_blocks.17.norm.linear.lora_down.weight\n", - "transformer.single_transformer_blocks.17.proj_mlp.lora_up.weight\n", - "transformer.single_transformer_blocks.17.proj_mlp.lora_down.weight\n", - "transformer.single_transformer_blocks.17.proj_out.lora_up.weight\n", - "transformer.single_transformer_blocks.17.proj_out.lora_down.weight\n", - "transformer.single_transformer_blocks.18.attn.to_k.lora_up.weight\n", - "transformer.single_transformer_blocks.18.attn.to_k.lora_down.weight\n", - "transformer.single_transformer_blocks.18.attn.to_q.lora_up.weight\n", - "transformer.single_transformer_blocks.18.attn.to_q.lora_down.weight\n", - "transformer.single_transformer_blocks.18.attn.to_v.lora_up.weight\n", - "transformer.single_transformer_blocks.18.attn.to_v.lora_down.weight\n", - "transformer.single_transformer_blocks.18.norm.linear.lora_up.weight\n", - "transformer.single_transformer_blocks.18.norm.linear.lora_down.weight\n", - "transformer.single_transformer_blocks.18.proj_mlp.lora_up.weight\n", - "transformer.single_transformer_blocks.18.proj_mlp.lora_down.weight\n", - "transformer.single_transformer_blocks.18.proj_out.lora_up.weight\n", - "transformer.single_transformer_blocks.18.proj_out.lora_down.weight\n", - "transformer.single_transformer_blocks.19.attn.to_k.lora_up.weight\n", - "transformer.single_transformer_blocks.19.attn.to_k.lora_down.weight\n", - "transformer.single_transformer_blocks.19.attn.to_q.lora_up.weight\n", - "transformer.single_transformer_blocks.19.attn.to_q.lora_down.weight\n", - "transformer.single_transformer_blocks.19.attn.to_v.lora_up.weight\n", - "transformer.single_transformer_blocks.19.attn.to_v.lora_down.weight\n", - "transformer.single_transformer_blocks.19.norm.linear.lora_up.weight\n", - "transformer.single_transformer_blocks.19.norm.linear.lora_down.weight\n", - "transformer.single_transformer_blocks.19.proj_mlp.lora_up.weight\n", - "transformer.single_transformer_blocks.19.proj_mlp.lora_down.weight\n", - "transformer.single_transformer_blocks.19.proj_out.lora_up.weight\n", - "transformer.single_transformer_blocks.19.proj_out.lora_down.weight\n", - "transformer.single_transformer_blocks.2.attn.to_k.lora_up.weight\n", - "transformer.single_transformer_blocks.2.attn.to_k.lora_down.weight\n", - "transformer.single_transformer_blocks.2.attn.to_q.lora_up.weight\n", - "transformer.single_transformer_blocks.2.attn.to_q.lora_down.weight\n", - "transformer.single_transformer_blocks.2.attn.to_v.lora_up.weight\n", - "transformer.single_transformer_blocks.2.attn.to_v.lora_down.weight\n", - "transformer.single_transformer_blocks.2.norm.linear.lora_up.weight\n", - "transformer.single_transformer_blocks.2.norm.linear.lora_down.weight\n", - "transformer.single_transformer_blocks.2.proj_mlp.lora_up.weight\n", - "transformer.single_transformer_blocks.2.proj_mlp.lora_down.weight\n", - "transformer.single_transformer_blocks.2.proj_out.lora_up.weight\n", - "transformer.single_transformer_blocks.2.proj_out.lora_down.weight\n", - "transformer.single_transformer_blocks.20.attn.to_k.lora_up.weight\n", - "transformer.single_transformer_blocks.20.attn.to_k.lora_down.weight\n", - "transformer.single_transformer_blocks.20.attn.to_q.lora_up.weight\n", - "transformer.single_transformer_blocks.20.attn.to_q.lora_down.weight\n", - "transformer.single_transformer_blocks.20.attn.to_v.lora_up.weight\n", - "transformer.single_transformer_blocks.20.attn.to_v.lora_down.weight\n", - "transformer.single_transformer_blocks.20.norm.linear.lora_up.weight\n", - "transformer.single_transformer_blocks.20.norm.linear.lora_down.weight\n", - "transformer.single_transformer_blocks.20.proj_mlp.lora_up.weight\n", - "transformer.single_transformer_blocks.20.proj_mlp.lora_down.weight\n", - "transformer.single_transformer_blocks.20.proj_out.lora_up.weight\n", - "transformer.single_transformer_blocks.20.proj_out.lora_down.weight\n", - "transformer.single_transformer_blocks.21.attn.to_k.lora_up.weight\n", - "transformer.single_transformer_blocks.21.attn.to_k.lora_down.weight\n", - "transformer.single_transformer_blocks.21.attn.to_q.lora_up.weight\n", - "transformer.single_transformer_blocks.21.attn.to_q.lora_down.weight\n", - "transformer.single_transformer_blocks.21.attn.to_v.lora_up.weight\n", - "transformer.single_transformer_blocks.21.attn.to_v.lora_down.weight\n", - "transformer.single_transformer_blocks.21.norm.linear.lora_up.weight\n", - "transformer.single_transformer_blocks.21.norm.linear.lora_down.weight\n", - "transformer.single_transformer_blocks.21.proj_mlp.lora_up.weight\n", - "transformer.single_transformer_blocks.21.proj_mlp.lora_down.weight\n", - "transformer.single_transformer_blocks.21.proj_out.lora_up.weight\n", - "transformer.single_transformer_blocks.21.proj_out.lora_down.weight\n", - "transformer.single_transformer_blocks.22.attn.to_k.lora_up.weight\n", - "transformer.single_transformer_blocks.22.attn.to_k.lora_down.weight\n", - "transformer.single_transformer_blocks.22.attn.to_q.lora_up.weight\n", - "transformer.single_transformer_blocks.22.attn.to_q.lora_down.weight\n", - "transformer.single_transformer_blocks.22.attn.to_v.lora_up.weight\n", - "transformer.single_transformer_blocks.22.attn.to_v.lora_down.weight\n", - "transformer.single_transformer_blocks.22.norm.linear.lora_up.weight\n", - "transformer.single_transformer_blocks.22.norm.linear.lora_down.weight\n", - "transformer.single_transformer_blocks.22.proj_mlp.lora_up.weight\n", - "transformer.single_transformer_blocks.22.proj_mlp.lora_down.weight\n", - "transformer.single_transformer_blocks.22.proj_out.lora_up.weight\n", - "transformer.single_transformer_blocks.22.proj_out.lora_down.weight\n", - "transformer.single_transformer_blocks.23.attn.to_k.lora_up.weight\n", - "transformer.single_transformer_blocks.23.attn.to_k.lora_down.weight\n", - "transformer.single_transformer_blocks.23.attn.to_q.lora_up.weight\n", - "transformer.single_transformer_blocks.23.attn.to_q.lora_down.weight\n", - "transformer.single_transformer_blocks.23.attn.to_v.lora_up.weight\n", - "transformer.single_transformer_blocks.23.attn.to_v.lora_down.weight\n", - "transformer.single_transformer_blocks.23.norm.linear.lora_up.weight\n", - "transformer.single_transformer_blocks.23.norm.linear.lora_down.weight\n", - "transformer.single_transformer_blocks.23.proj_mlp.lora_up.weight\n", - "transformer.single_transformer_blocks.23.proj_mlp.lora_down.weight\n", - "transformer.single_transformer_blocks.23.proj_out.lora_up.weight\n", - "transformer.single_transformer_blocks.23.proj_out.lora_down.weight\n", - "transformer.single_transformer_blocks.24.attn.to_k.lora_up.weight\n", - "transformer.single_transformer_blocks.24.attn.to_k.lora_down.weight\n", - "transformer.single_transformer_blocks.24.attn.to_q.lora_up.weight\n", - "transformer.single_transformer_blocks.24.attn.to_q.lora_down.weight\n", - "transformer.single_transformer_blocks.24.attn.to_v.lora_up.weight\n", - "transformer.single_transformer_blocks.24.attn.to_v.lora_down.weight\n", - "transformer.single_transformer_blocks.24.norm.linear.lora_up.weight\n", - "transformer.single_transformer_blocks.24.norm.linear.lora_down.weight\n", - "transformer.single_transformer_blocks.24.proj_mlp.lora_up.weight\n", - "transformer.single_transformer_blocks.24.proj_mlp.lora_down.weight\n", - "transformer.single_transformer_blocks.24.proj_out.lora_up.weight\n", - "transformer.single_transformer_blocks.24.proj_out.lora_down.weight\n", - "transformer.single_transformer_blocks.25.attn.to_k.lora_up.weight\n", - "transformer.single_transformer_blocks.25.attn.to_k.lora_down.weight\n", - "transformer.single_transformer_blocks.25.attn.to_q.lora_up.weight\n", - "transformer.single_transformer_blocks.25.attn.to_q.lora_down.weight\n", - "transformer.single_transformer_blocks.25.attn.to_v.lora_up.weight\n", - "transformer.single_transformer_blocks.25.attn.to_v.lora_down.weight\n", - "transformer.single_transformer_blocks.25.norm.linear.lora_up.weight\n", - "transformer.single_transformer_blocks.25.norm.linear.lora_down.weight\n", - "transformer.single_transformer_blocks.25.proj_mlp.lora_up.weight\n", - "transformer.single_transformer_blocks.25.proj_mlp.lora_down.weight\n", - "transformer.single_transformer_blocks.25.proj_out.lora_up.weight\n", - "transformer.single_transformer_blocks.25.proj_out.lora_down.weight\n", - "transformer.single_transformer_blocks.26.attn.to_k.lora_up.weight\n", - "transformer.single_transformer_blocks.26.attn.to_k.lora_down.weight\n", - "transformer.single_transformer_blocks.26.attn.to_q.lora_up.weight\n", - "transformer.single_transformer_blocks.26.attn.to_q.lora_down.weight\n", - "transformer.single_transformer_blocks.26.attn.to_v.lora_up.weight\n", - "transformer.single_transformer_blocks.26.attn.to_v.lora_down.weight\n", - "transformer.single_transformer_blocks.26.norm.linear.lora_up.weight\n", - "transformer.single_transformer_blocks.26.norm.linear.lora_down.weight\n", - "transformer.single_transformer_blocks.26.proj_mlp.lora_up.weight\n", - "transformer.single_transformer_blocks.26.proj_mlp.lora_down.weight\n", - "transformer.single_transformer_blocks.26.proj_out.lora_up.weight\n", - "transformer.single_transformer_blocks.26.proj_out.lora_down.weight\n", - "transformer.single_transformer_blocks.27.attn.to_k.lora_up.weight\n", - "transformer.single_transformer_blocks.27.attn.to_k.lora_down.weight\n", - "transformer.single_transformer_blocks.27.attn.to_q.lora_up.weight\n", - "transformer.single_transformer_blocks.27.attn.to_q.lora_down.weight\n", - "transformer.single_transformer_blocks.27.attn.to_v.lora_up.weight\n", - "transformer.single_transformer_blocks.27.attn.to_v.lora_down.weight\n", - "transformer.single_transformer_blocks.27.norm.linear.lora_up.weight\n", - "transformer.single_transformer_blocks.27.norm.linear.lora_down.weight\n", - "transformer.single_transformer_blocks.27.proj_mlp.lora_up.weight\n", - "transformer.single_transformer_blocks.27.proj_mlp.lora_down.weight\n", - "transformer.single_transformer_blocks.27.proj_out.lora_up.weight\n", - "transformer.single_transformer_blocks.27.proj_out.lora_down.weight\n", - "transformer.single_transformer_blocks.28.attn.to_k.lora_up.weight\n", - "transformer.single_transformer_blocks.28.attn.to_k.lora_down.weight\n", - "transformer.single_transformer_blocks.28.attn.to_q.lora_up.weight\n", - "transformer.single_transformer_blocks.28.attn.to_q.lora_down.weight\n", - "transformer.single_transformer_blocks.28.attn.to_v.lora_up.weight\n", - "transformer.single_transformer_blocks.28.attn.to_v.lora_down.weight\n", - "transformer.single_transformer_blocks.28.norm.linear.lora_up.weight\n", - "transformer.single_transformer_blocks.28.norm.linear.lora_down.weight\n", - "transformer.single_transformer_blocks.28.proj_mlp.lora_up.weight\n", - "transformer.single_transformer_blocks.28.proj_mlp.lora_down.weight\n", - "transformer.single_transformer_blocks.28.proj_out.lora_up.weight\n", - "transformer.single_transformer_blocks.28.proj_out.lora_down.weight\n", - "transformer.single_transformer_blocks.29.attn.to_k.lora_up.weight\n", - "transformer.single_transformer_blocks.29.attn.to_k.lora_down.weight\n", - "transformer.single_transformer_blocks.29.attn.to_q.lora_up.weight\n", - "transformer.single_transformer_blocks.29.attn.to_q.lora_down.weight\n", - "transformer.single_transformer_blocks.29.attn.to_v.lora_up.weight\n", - "transformer.single_transformer_blocks.29.attn.to_v.lora_down.weight\n", - "transformer.single_transformer_blocks.29.norm.linear.lora_up.weight\n", - "transformer.single_transformer_blocks.29.norm.linear.lora_down.weight\n", - "transformer.single_transformer_blocks.29.proj_mlp.lora_up.weight\n", - "transformer.single_transformer_blocks.29.proj_mlp.lora_down.weight\n", - "transformer.single_transformer_blocks.29.proj_out.lora_up.weight\n", - "transformer.single_transformer_blocks.29.proj_out.lora_down.weight\n", - "transformer.single_transformer_blocks.3.attn.to_k.lora_up.weight\n", - "transformer.single_transformer_blocks.3.attn.to_k.lora_down.weight\n", - "transformer.single_transformer_blocks.3.attn.to_q.lora_up.weight\n", - "transformer.single_transformer_blocks.3.attn.to_q.lora_down.weight\n", - "transformer.single_transformer_blocks.3.attn.to_v.lora_up.weight\n", - "transformer.single_transformer_blocks.3.attn.to_v.lora_down.weight\n", - "transformer.single_transformer_blocks.3.norm.linear.lora_up.weight\n", - "transformer.single_transformer_blocks.3.norm.linear.lora_down.weight\n", - "transformer.single_transformer_blocks.3.proj_mlp.lora_up.weight\n", - "transformer.single_transformer_blocks.3.proj_mlp.lora_down.weight\n", - "transformer.single_transformer_blocks.3.proj_out.lora_up.weight\n", - "transformer.single_transformer_blocks.3.proj_out.lora_down.weight\n", - "transformer.single_transformer_blocks.30.attn.to_k.lora_up.weight\n", - "transformer.single_transformer_blocks.30.attn.to_k.lora_down.weight\n", - "transformer.single_transformer_blocks.30.attn.to_q.lora_up.weight\n", - "transformer.single_transformer_blocks.30.attn.to_q.lora_down.weight\n", - "transformer.single_transformer_blocks.30.attn.to_v.lora_up.weight\n", - "transformer.single_transformer_blocks.30.attn.to_v.lora_down.weight\n", - "transformer.single_transformer_blocks.30.norm.linear.lora_up.weight\n", - "transformer.single_transformer_blocks.30.norm.linear.lora_down.weight\n", - "transformer.single_transformer_blocks.30.proj_mlp.lora_up.weight\n", - "transformer.single_transformer_blocks.30.proj_mlp.lora_down.weight\n", - "transformer.single_transformer_blocks.30.proj_out.lora_up.weight\n", - "transformer.single_transformer_blocks.30.proj_out.lora_down.weight\n", - "transformer.single_transformer_blocks.31.attn.to_k.lora_up.weight\n", - "transformer.single_transformer_blocks.31.attn.to_k.lora_down.weight\n", - "transformer.single_transformer_blocks.31.attn.to_q.lora_up.weight\n", - "transformer.single_transformer_blocks.31.attn.to_q.lora_down.weight\n", - "transformer.single_transformer_blocks.31.attn.to_v.lora_up.weight\n", - "transformer.single_transformer_blocks.31.attn.to_v.lora_down.weight\n", - "transformer.single_transformer_blocks.31.norm.linear.lora_up.weight\n", - "transformer.single_transformer_blocks.31.norm.linear.lora_down.weight\n", - "transformer.single_transformer_blocks.31.proj_mlp.lora_up.weight\n", - "transformer.single_transformer_blocks.31.proj_mlp.lora_down.weight\n", - "transformer.single_transformer_blocks.31.proj_out.lora_up.weight\n", - "transformer.single_transformer_blocks.31.proj_out.lora_down.weight\n", - "transformer.single_transformer_blocks.32.attn.to_k.lora_up.weight\n", - "transformer.single_transformer_blocks.32.attn.to_k.lora_down.weight\n", - "transformer.single_transformer_blocks.32.attn.to_q.lora_up.weight\n", - "transformer.single_transformer_blocks.32.attn.to_q.lora_down.weight\n", - "transformer.single_transformer_blocks.32.attn.to_v.lora_up.weight\n", - "transformer.single_transformer_blocks.32.attn.to_v.lora_down.weight\n", - "transformer.single_transformer_blocks.32.norm.linear.lora_up.weight\n", - "transformer.single_transformer_blocks.32.norm.linear.lora_down.weight\n", - "transformer.single_transformer_blocks.32.proj_mlp.lora_up.weight\n", - "transformer.single_transformer_blocks.32.proj_mlp.lora_down.weight\n", - "transformer.single_transformer_blocks.32.proj_out.lora_up.weight\n", - "transformer.single_transformer_blocks.32.proj_out.lora_down.weight\n", - "transformer.single_transformer_blocks.33.attn.to_k.lora_up.weight\n", - "transformer.single_transformer_blocks.33.attn.to_k.lora_down.weight\n", - "transformer.single_transformer_blocks.33.attn.to_q.lora_up.weight\n", - "transformer.single_transformer_blocks.33.attn.to_q.lora_down.weight\n", - "transformer.single_transformer_blocks.33.attn.to_v.lora_up.weight\n", - "transformer.single_transformer_blocks.33.attn.to_v.lora_down.weight\n", - "transformer.single_transformer_blocks.33.norm.linear.lora_up.weight\n", - "transformer.single_transformer_blocks.33.norm.linear.lora_down.weight\n", - "transformer.single_transformer_blocks.33.proj_mlp.lora_up.weight\n", - "transformer.single_transformer_blocks.33.proj_mlp.lora_down.weight\n", - "transformer.single_transformer_blocks.33.proj_out.lora_up.weight\n", - "transformer.single_transformer_blocks.33.proj_out.lora_down.weight\n", - "transformer.single_transformer_blocks.34.attn.to_k.lora_up.weight\n", - "transformer.single_transformer_blocks.34.attn.to_k.lora_down.weight\n", - "transformer.single_transformer_blocks.34.attn.to_q.lora_up.weight\n", - "transformer.single_transformer_blocks.34.attn.to_q.lora_down.weight\n", - "transformer.single_transformer_blocks.34.attn.to_v.lora_up.weight\n", - "transformer.single_transformer_blocks.34.attn.to_v.lora_down.weight\n", - "transformer.single_transformer_blocks.34.norm.linear.lora_up.weight\n", - "transformer.single_transformer_blocks.34.norm.linear.lora_down.weight\n", - "transformer.single_transformer_blocks.34.proj_mlp.lora_up.weight\n", - "transformer.single_transformer_blocks.34.proj_mlp.lora_down.weight\n", - "transformer.single_transformer_blocks.34.proj_out.lora_up.weight\n", - "transformer.single_transformer_blocks.34.proj_out.lora_down.weight\n", - "transformer.single_transformer_blocks.35.attn.to_k.lora_up.weight\n", - "transformer.single_transformer_blocks.35.attn.to_k.lora_down.weight\n", - "transformer.single_transformer_blocks.35.attn.to_q.lora_up.weight\n", - "transformer.single_transformer_blocks.35.attn.to_q.lora_down.weight\n", - "transformer.single_transformer_blocks.35.attn.to_v.lora_up.weight\n", - "transformer.single_transformer_blocks.35.attn.to_v.lora_down.weight\n", - "transformer.single_transformer_blocks.35.norm.linear.lora_up.weight\n", - "transformer.single_transformer_blocks.35.norm.linear.lora_down.weight\n", - "transformer.single_transformer_blocks.35.proj_mlp.lora_up.weight\n", - "transformer.single_transformer_blocks.35.proj_mlp.lora_down.weight\n", - "transformer.single_transformer_blocks.35.proj_out.lora_up.weight\n", - "transformer.single_transformer_blocks.35.proj_out.lora_down.weight\n", - "transformer.single_transformer_blocks.36.attn.to_k.lora_up.weight\n", - "transformer.single_transformer_blocks.36.attn.to_k.lora_down.weight\n", - "transformer.single_transformer_blocks.36.attn.to_q.lora_up.weight\n", - "transformer.single_transformer_blocks.36.attn.to_q.lora_down.weight\n", - "transformer.single_transformer_blocks.36.attn.to_v.lora_up.weight\n", - "transformer.single_transformer_blocks.36.attn.to_v.lora_down.weight\n", - "transformer.single_transformer_blocks.36.norm.linear.lora_up.weight\n", - "transformer.single_transformer_blocks.36.norm.linear.lora_down.weight\n", - "transformer.single_transformer_blocks.36.proj_mlp.lora_up.weight\n", - "transformer.single_transformer_blocks.36.proj_mlp.lora_down.weight\n", - "transformer.single_transformer_blocks.36.proj_out.lora_up.weight\n", - "transformer.single_transformer_blocks.36.proj_out.lora_down.weight\n", - "transformer.single_transformer_blocks.37.attn.to_k.lora_up.weight\n", - "transformer.single_transformer_blocks.37.attn.to_k.lora_down.weight\n", - "transformer.single_transformer_blocks.37.attn.to_q.lora_up.weight\n", - "transformer.single_transformer_blocks.37.attn.to_q.lora_down.weight\n", - "transformer.single_transformer_blocks.37.attn.to_v.lora_up.weight\n", - "transformer.single_transformer_blocks.37.attn.to_v.lora_down.weight\n", - "transformer.single_transformer_blocks.37.norm.linear.lora_up.weight\n", - "transformer.single_transformer_blocks.37.norm.linear.lora_down.weight\n", - "transformer.single_transformer_blocks.37.proj_mlp.lora_up.weight\n", - "transformer.single_transformer_blocks.37.proj_mlp.lora_down.weight\n", - "transformer.single_transformer_blocks.37.proj_out.lora_up.weight\n", - "transformer.single_transformer_blocks.37.proj_out.lora_down.weight\n", - "transformer.single_transformer_blocks.4.attn.to_k.lora_up.weight\n", - "transformer.single_transformer_blocks.4.attn.to_k.lora_down.weight\n", - "transformer.single_transformer_blocks.4.attn.to_q.lora_up.weight\n", - "transformer.single_transformer_blocks.4.attn.to_q.lora_down.weight\n", - "transformer.single_transformer_blocks.4.attn.to_v.lora_up.weight\n", - "transformer.single_transformer_blocks.4.attn.to_v.lora_down.weight\n", - "transformer.single_transformer_blocks.4.norm.linear.lora_up.weight\n", - "transformer.single_transformer_blocks.4.norm.linear.lora_down.weight\n", - "transformer.single_transformer_blocks.4.proj_mlp.lora_up.weight\n", - "transformer.single_transformer_blocks.4.proj_mlp.lora_down.weight\n", - "transformer.single_transformer_blocks.4.proj_out.lora_up.weight\n", - "transformer.single_transformer_blocks.4.proj_out.lora_down.weight\n", - "transformer.single_transformer_blocks.5.attn.to_k.lora_up.weight\n", - "transformer.single_transformer_blocks.5.attn.to_k.lora_down.weight\n", - "transformer.single_transformer_blocks.5.attn.to_q.lora_up.weight\n", - "transformer.single_transformer_blocks.5.attn.to_q.lora_down.weight\n", - "transformer.single_transformer_blocks.5.attn.to_v.lora_up.weight\n", - "transformer.single_transformer_blocks.5.attn.to_v.lora_down.weight\n", - "transformer.single_transformer_blocks.5.norm.linear.lora_up.weight\n", - "transformer.single_transformer_blocks.5.norm.linear.lora_down.weight\n", - "transformer.single_transformer_blocks.5.proj_mlp.lora_up.weight\n", - "transformer.single_transformer_blocks.5.proj_mlp.lora_down.weight\n", - "transformer.single_transformer_blocks.5.proj_out.lora_up.weight\n", - "transformer.single_transformer_blocks.5.proj_out.lora_down.weight\n", - "transformer.single_transformer_blocks.6.attn.to_k.lora_up.weight\n", - "transformer.single_transformer_blocks.6.attn.to_k.lora_down.weight\n", - "transformer.single_transformer_blocks.6.attn.to_q.lora_up.weight\n", - "transformer.single_transformer_blocks.6.attn.to_q.lora_down.weight\n", - "transformer.single_transformer_blocks.6.attn.to_v.lora_up.weight\n", - "transformer.single_transformer_blocks.6.attn.to_v.lora_down.weight\n", - "transformer.single_transformer_blocks.6.norm.linear.lora_up.weight\n", - "transformer.single_transformer_blocks.6.norm.linear.lora_down.weight\n", - "transformer.single_transformer_blocks.6.proj_mlp.lora_up.weight\n", - "transformer.single_transformer_blocks.6.proj_mlp.lora_down.weight\n", - "transformer.single_transformer_blocks.6.proj_out.lora_up.weight\n", - "transformer.single_transformer_blocks.6.proj_out.lora_down.weight\n", - "transformer.single_transformer_blocks.7.attn.to_k.lora_up.weight\n", - "transformer.single_transformer_blocks.7.attn.to_k.lora_down.weight\n", - "transformer.single_transformer_blocks.7.attn.to_q.lora_up.weight\n", - "transformer.single_transformer_blocks.7.attn.to_q.lora_down.weight\n", - "transformer.single_transformer_blocks.7.attn.to_v.lora_up.weight\n", - "transformer.single_transformer_blocks.7.attn.to_v.lora_down.weight\n", - "transformer.single_transformer_blocks.7.norm.linear.lora_up.weight\n", - "transformer.single_transformer_blocks.7.norm.linear.lora_down.weight\n", - "transformer.single_transformer_blocks.7.proj_mlp.lora_up.weight\n", - "transformer.single_transformer_blocks.7.proj_mlp.lora_down.weight\n", - "transformer.single_transformer_blocks.7.proj_out.lora_up.weight\n", - "transformer.single_transformer_blocks.7.proj_out.lora_down.weight\n", - "transformer.single_transformer_blocks.8.attn.to_k.lora_up.weight\n", - "transformer.single_transformer_blocks.8.attn.to_k.lora_down.weight\n", - "transformer.single_transformer_blocks.8.attn.to_q.lora_up.weight\n", - "transformer.single_transformer_blocks.8.attn.to_q.lora_down.weight\n", - "transformer.single_transformer_blocks.8.attn.to_v.lora_up.weight\n", - "transformer.single_transformer_blocks.8.attn.to_v.lora_down.weight\n", - "transformer.single_transformer_blocks.8.norm.linear.lora_up.weight\n", - "transformer.single_transformer_blocks.8.norm.linear.lora_down.weight\n", - "transformer.single_transformer_blocks.8.proj_mlp.lora_up.weight\n", - "transformer.single_transformer_blocks.8.proj_mlp.lora_down.weight\n", - "transformer.single_transformer_blocks.8.proj_out.lora_up.weight\n", - "transformer.single_transformer_blocks.8.proj_out.lora_down.weight\n", - "transformer.single_transformer_blocks.9.attn.to_k.lora_up.weight\n", - "transformer.single_transformer_blocks.9.attn.to_k.lora_down.weight\n", - "transformer.single_transformer_blocks.9.attn.to_q.lora_up.weight\n", - "transformer.single_transformer_blocks.9.attn.to_q.lora_down.weight\n", - "transformer.single_transformer_blocks.9.attn.to_v.lora_up.weight\n", - "transformer.single_transformer_blocks.9.attn.to_v.lora_down.weight\n", - "transformer.single_transformer_blocks.9.norm.linear.lora_up.weight\n", - "transformer.single_transformer_blocks.9.norm.linear.lora_down.weight\n", - "transformer.single_transformer_blocks.9.proj_mlp.lora_up.weight\n", - "transformer.single_transformer_blocks.9.proj_mlp.lora_down.weight\n", - "transformer.single_transformer_blocks.9.proj_out.lora_up.weight\n", - "transformer.single_transformer_blocks.9.proj_out.lora_down.weight\n", - "transformer.transformer_blocks.0.attn.add_k_proj.lora_up.weight\n", - "transformer.transformer_blocks.0.attn.add_k_proj.lora_down.weight\n", - "transformer.transformer_blocks.0.attn.add_q_proj.lora_up.weight\n", - "transformer.transformer_blocks.0.attn.add_q_proj.lora_down.weight\n", - "transformer.transformer_blocks.0.attn.add_v_proj.lora_up.weight\n", - "transformer.transformer_blocks.0.attn.add_v_proj.lora_down.weight\n", - "transformer.transformer_blocks.0.attn.to_add_out.lora_up.weight\n", - "transformer.transformer_blocks.0.attn.to_add_out.lora_down.weight\n", - "transformer.transformer_blocks.0.attn.to_k.lora_up.weight\n", - "transformer.transformer_blocks.0.attn.to_k.lora_down.weight\n", - "transformer.transformer_blocks.0.attn.to_out.0.lora_up.weight\n", - "transformer.transformer_blocks.0.attn.to_out.0.lora_down.weight\n", - "transformer.transformer_blocks.0.attn.to_q.lora_up.weight\n", - "transformer.transformer_blocks.0.attn.to_q.lora_down.weight\n", - "transformer.transformer_blocks.0.attn.to_v.lora_up.weight\n", - "transformer.transformer_blocks.0.attn.to_v.lora_down.weight\n", - "transformer.transformer_blocks.0.ff.net.0.proj.lora_up.weight\n", - "transformer.transformer_blocks.0.ff.net.0.proj.lora_down.weight\n", - "transformer.transformer_blocks.0.ff.net.2.lora_up.weight\n", - "transformer.transformer_blocks.0.ff.net.2.lora_down.weight\n", - "transformer.transformer_blocks.0.ff_context.net.0.proj.lora_up.weight\n", - "transformer.transformer_blocks.0.ff_context.net.0.proj.lora_down.weight\n", - "transformer.transformer_blocks.0.ff_context.net.2.lora_up.weight\n", - "transformer.transformer_blocks.0.ff_context.net.2.lora_down.weight\n", - "transformer.transformer_blocks.0.norm1.linear.lora_up.weight\n", - "transformer.transformer_blocks.0.norm1.linear.lora_down.weight\n", - "transformer.transformer_blocks.0.norm1_context.linear.lora_up.weight\n", - "transformer.transformer_blocks.0.norm1_context.linear.lora_down.weight\n", - "transformer.transformer_blocks.1.attn.add_k_proj.lora_up.weight\n", - "transformer.transformer_blocks.1.attn.add_k_proj.lora_down.weight\n", - "transformer.transformer_blocks.1.attn.add_q_proj.lora_up.weight\n", - "transformer.transformer_blocks.1.attn.add_q_proj.lora_down.weight\n", - "transformer.transformer_blocks.1.attn.add_v_proj.lora_up.weight\n", - "transformer.transformer_blocks.1.attn.add_v_proj.lora_down.weight\n", - "transformer.transformer_blocks.1.attn.to_add_out.lora_up.weight\n", - "transformer.transformer_blocks.1.attn.to_add_out.lora_down.weight\n", - "transformer.transformer_blocks.1.attn.to_k.lora_up.weight\n", - "transformer.transformer_blocks.1.attn.to_k.lora_down.weight\n", - "transformer.transformer_blocks.1.attn.to_out.0.lora_up.weight\n", - "transformer.transformer_blocks.1.attn.to_out.0.lora_down.weight\n", - "transformer.transformer_blocks.1.attn.to_q.lora_up.weight\n", - "transformer.transformer_blocks.1.attn.to_q.lora_down.weight\n", - "transformer.transformer_blocks.1.attn.to_v.lora_up.weight\n", - "transformer.transformer_blocks.1.attn.to_v.lora_down.weight\n", - "transformer.transformer_blocks.1.ff.net.0.proj.lora_up.weight\n", - "transformer.transformer_blocks.1.ff.net.0.proj.lora_down.weight\n", - "transformer.transformer_blocks.1.ff.net.2.lora_up.weight\n", - "transformer.transformer_blocks.1.ff.net.2.lora_down.weight\n", - "transformer.transformer_blocks.1.ff_context.net.0.proj.lora_up.weight\n", - "transformer.transformer_blocks.1.ff_context.net.0.proj.lora_down.weight\n", - "transformer.transformer_blocks.1.ff_context.net.2.lora_up.weight\n", - "transformer.transformer_blocks.1.ff_context.net.2.lora_down.weight\n", - "transformer.transformer_blocks.1.norm1.linear.lora_up.weight\n", - "transformer.transformer_blocks.1.norm1.linear.lora_down.weight\n", - "transformer.transformer_blocks.1.norm1_context.linear.lora_up.weight\n", - "transformer.transformer_blocks.1.norm1_context.linear.lora_down.weight\n", - "transformer.transformer_blocks.10.attn.add_k_proj.lora_up.weight\n", - "transformer.transformer_blocks.10.attn.add_k_proj.lora_down.weight\n", - "transformer.transformer_blocks.10.attn.add_q_proj.lora_up.weight\n", - "transformer.transformer_blocks.10.attn.add_q_proj.lora_down.weight\n", - "transformer.transformer_blocks.10.attn.add_v_proj.lora_up.weight\n", - "transformer.transformer_blocks.10.attn.add_v_proj.lora_down.weight\n", - "transformer.transformer_blocks.10.attn.to_add_out.lora_up.weight\n", - "transformer.transformer_blocks.10.attn.to_add_out.lora_down.weight\n", - "transformer.transformer_blocks.10.attn.to_k.lora_up.weight\n", - "transformer.transformer_blocks.10.attn.to_k.lora_down.weight\n", - "transformer.transformer_blocks.10.attn.to_out.0.lora_up.weight\n", - "transformer.transformer_blocks.10.attn.to_out.0.lora_down.weight\n", - "transformer.transformer_blocks.10.attn.to_q.lora_up.weight\n", - "transformer.transformer_blocks.10.attn.to_q.lora_down.weight\n", - "transformer.transformer_blocks.10.attn.to_v.lora_up.weight\n", - "transformer.transformer_blocks.10.attn.to_v.lora_down.weight\n", - "transformer.transformer_blocks.10.ff.net.0.proj.lora_up.weight\n", - "transformer.transformer_blocks.10.ff.net.0.proj.lora_down.weight\n", - "transformer.transformer_blocks.10.ff.net.2.lora_up.weight\n", - "transformer.transformer_blocks.10.ff.net.2.lora_down.weight\n", - "transformer.transformer_blocks.10.ff_context.net.0.proj.lora_up.weight\n", - "transformer.transformer_blocks.10.ff_context.net.0.proj.lora_down.weight\n", - "transformer.transformer_blocks.10.ff_context.net.2.lora_up.weight\n", - "transformer.transformer_blocks.10.ff_context.net.2.lora_down.weight\n", - "transformer.transformer_blocks.10.norm1.linear.lora_up.weight\n", - "transformer.transformer_blocks.10.norm1.linear.lora_down.weight\n", - "transformer.transformer_blocks.10.norm1_context.linear.lora_up.weight\n", - "transformer.transformer_blocks.10.norm1_context.linear.lora_down.weight\n", - "transformer.transformer_blocks.11.attn.add_k_proj.lora_up.weight\n", - "transformer.transformer_blocks.11.attn.add_k_proj.lora_down.weight\n", - "transformer.transformer_blocks.11.attn.add_q_proj.lora_up.weight\n", - "transformer.transformer_blocks.11.attn.add_q_proj.lora_down.weight\n", - "transformer.transformer_blocks.11.attn.add_v_proj.lora_up.weight\n", - "transformer.transformer_blocks.11.attn.add_v_proj.lora_down.weight\n", - "transformer.transformer_blocks.11.attn.to_add_out.lora_up.weight\n", - "transformer.transformer_blocks.11.attn.to_add_out.lora_down.weight\n", - "transformer.transformer_blocks.11.attn.to_k.lora_up.weight\n", - "transformer.transformer_blocks.11.attn.to_k.lora_down.weight\n", - "transformer.transformer_blocks.11.attn.to_out.0.lora_up.weight\n", - "transformer.transformer_blocks.11.attn.to_out.0.lora_down.weight\n", - "transformer.transformer_blocks.11.attn.to_q.lora_up.weight\n", - "transformer.transformer_blocks.11.attn.to_q.lora_down.weight\n", - "transformer.transformer_blocks.11.attn.to_v.lora_up.weight\n", - "transformer.transformer_blocks.11.attn.to_v.lora_down.weight\n", - "transformer.transformer_blocks.11.ff.net.0.proj.lora_up.weight\n", - "transformer.transformer_blocks.11.ff.net.0.proj.lora_down.weight\n", - "transformer.transformer_blocks.11.ff.net.2.lora_up.weight\n", - "transformer.transformer_blocks.11.ff.net.2.lora_down.weight\n", - "transformer.transformer_blocks.11.ff_context.net.0.proj.lora_up.weight\n", - "transformer.transformer_blocks.11.ff_context.net.0.proj.lora_down.weight\n", - "transformer.transformer_blocks.11.ff_context.net.2.lora_up.weight\n", - "transformer.transformer_blocks.11.ff_context.net.2.lora_down.weight\n", - "transformer.transformer_blocks.11.norm1.linear.lora_up.weight\n", - "transformer.transformer_blocks.11.norm1.linear.lora_down.weight\n", - "transformer.transformer_blocks.11.norm1_context.linear.lora_up.weight\n", - "transformer.transformer_blocks.11.norm1_context.linear.lora_down.weight\n", - "transformer.transformer_blocks.12.attn.add_k_proj.lora_up.weight\n", - "transformer.transformer_blocks.12.attn.add_k_proj.lora_down.weight\n", - "transformer.transformer_blocks.12.attn.add_q_proj.lora_up.weight\n", - "transformer.transformer_blocks.12.attn.add_q_proj.lora_down.weight\n", - "transformer.transformer_blocks.12.attn.add_v_proj.lora_up.weight\n", - "transformer.transformer_blocks.12.attn.add_v_proj.lora_down.weight\n", - "transformer.transformer_blocks.12.attn.to_add_out.lora_up.weight\n", - "transformer.transformer_blocks.12.attn.to_add_out.lora_down.weight\n", - "transformer.transformer_blocks.12.attn.to_k.lora_up.weight\n", - "transformer.transformer_blocks.12.attn.to_k.lora_down.weight\n", - "transformer.transformer_blocks.12.attn.to_out.0.lora_up.weight\n", - "transformer.transformer_blocks.12.attn.to_out.0.lora_down.weight\n", - "transformer.transformer_blocks.12.attn.to_q.lora_up.weight\n", - "transformer.transformer_blocks.12.attn.to_q.lora_down.weight\n", - "transformer.transformer_blocks.12.attn.to_v.lora_up.weight\n", - "transformer.transformer_blocks.12.attn.to_v.lora_down.weight\n", - "transformer.transformer_blocks.12.ff.net.0.proj.lora_up.weight\n", - "transformer.transformer_blocks.12.ff.net.0.proj.lora_down.weight\n", - "transformer.transformer_blocks.12.ff.net.2.lora_up.weight\n", - "transformer.transformer_blocks.12.ff.net.2.lora_down.weight\n", - "transformer.transformer_blocks.12.ff_context.net.0.proj.lora_up.weight\n", - "transformer.transformer_blocks.12.ff_context.net.0.proj.lora_down.weight\n", - "transformer.transformer_blocks.12.ff_context.net.2.lora_up.weight\n", - "transformer.transformer_blocks.12.ff_context.net.2.lora_down.weight\n", - "transformer.transformer_blocks.12.norm1.linear.lora_up.weight\n", - "transformer.transformer_blocks.12.norm1.linear.lora_down.weight\n", - "transformer.transformer_blocks.12.norm1_context.linear.lora_up.weight\n", - "transformer.transformer_blocks.12.norm1_context.linear.lora_down.weight\n", - "transformer.transformer_blocks.13.attn.add_k_proj.lora_up.weight\n", - "transformer.transformer_blocks.13.attn.add_k_proj.lora_down.weight\n", - "transformer.transformer_blocks.13.attn.add_q_proj.lora_up.weight\n", - "transformer.transformer_blocks.13.attn.add_q_proj.lora_down.weight\n", - "transformer.transformer_blocks.13.attn.add_v_proj.lora_up.weight\n", - "transformer.transformer_blocks.13.attn.add_v_proj.lora_down.weight\n", - "transformer.transformer_blocks.13.attn.to_add_out.lora_up.weight\n", - "transformer.transformer_blocks.13.attn.to_add_out.lora_down.weight\n", - "transformer.transformer_blocks.13.attn.to_k.lora_up.weight\n", - "transformer.transformer_blocks.13.attn.to_k.lora_down.weight\n", - "transformer.transformer_blocks.13.attn.to_out.0.lora_up.weight\n", - "transformer.transformer_blocks.13.attn.to_out.0.lora_down.weight\n", - "transformer.transformer_blocks.13.attn.to_q.lora_up.weight\n", - "transformer.transformer_blocks.13.attn.to_q.lora_down.weight\n", - "transformer.transformer_blocks.13.attn.to_v.lora_up.weight\n", - "transformer.transformer_blocks.13.attn.to_v.lora_down.weight\n", - "transformer.transformer_blocks.13.ff.net.0.proj.lora_up.weight\n", - "transformer.transformer_blocks.13.ff.net.0.proj.lora_down.weight\n", - "transformer.transformer_blocks.13.ff.net.2.lora_up.weight\n", - "transformer.transformer_blocks.13.ff.net.2.lora_down.weight\n", - "transformer.transformer_blocks.13.ff_context.net.0.proj.lora_up.weight\n", - "transformer.transformer_blocks.13.ff_context.net.0.proj.lora_down.weight\n", - "transformer.transformer_blocks.13.ff_context.net.2.lora_up.weight\n", - "transformer.transformer_blocks.13.ff_context.net.2.lora_down.weight\n", - "transformer.transformer_blocks.13.norm1.linear.lora_up.weight\n", - "transformer.transformer_blocks.13.norm1.linear.lora_down.weight\n", - "transformer.transformer_blocks.13.norm1_context.linear.lora_up.weight\n", - "transformer.transformer_blocks.13.norm1_context.linear.lora_down.weight\n", - "transformer.transformer_blocks.14.attn.add_k_proj.lora_up.weight\n", - "transformer.transformer_blocks.14.attn.add_k_proj.lora_down.weight\n", - "transformer.transformer_blocks.14.attn.add_q_proj.lora_up.weight\n", - "transformer.transformer_blocks.14.attn.add_q_proj.lora_down.weight\n", - "transformer.transformer_blocks.14.attn.add_v_proj.lora_up.weight\n", - "transformer.transformer_blocks.14.attn.add_v_proj.lora_down.weight\n", - "transformer.transformer_blocks.14.attn.to_add_out.lora_up.weight\n", - "transformer.transformer_blocks.14.attn.to_add_out.lora_down.weight\n", - "transformer.transformer_blocks.14.attn.to_k.lora_up.weight\n", - "transformer.transformer_blocks.14.attn.to_k.lora_down.weight\n", - "transformer.transformer_blocks.14.attn.to_out.0.lora_up.weight\n", - "transformer.transformer_blocks.14.attn.to_out.0.lora_down.weight\n", - "transformer.transformer_blocks.14.attn.to_q.lora_up.weight\n", - "transformer.transformer_blocks.14.attn.to_q.lora_down.weight\n", - "transformer.transformer_blocks.14.attn.to_v.lora_up.weight\n", - "transformer.transformer_blocks.14.attn.to_v.lora_down.weight\n", - "transformer.transformer_blocks.14.ff.net.0.proj.lora_up.weight\n", - "transformer.transformer_blocks.14.ff.net.0.proj.lora_down.weight\n", - "transformer.transformer_blocks.14.ff.net.2.lora_up.weight\n", - "transformer.transformer_blocks.14.ff.net.2.lora_down.weight\n", - "transformer.transformer_blocks.14.ff_context.net.0.proj.lora_up.weight\n", - "transformer.transformer_blocks.14.ff_context.net.0.proj.lora_down.weight\n", - "transformer.transformer_blocks.14.ff_context.net.2.lora_up.weight\n", - "transformer.transformer_blocks.14.ff_context.net.2.lora_down.weight\n", - "transformer.transformer_blocks.14.norm1.linear.lora_up.weight\n", - "transformer.transformer_blocks.14.norm1.linear.lora_down.weight\n", - "transformer.transformer_blocks.14.norm1_context.linear.lora_up.weight\n", - "transformer.transformer_blocks.14.norm1_context.linear.lora_down.weight\n", - "transformer.transformer_blocks.15.attn.add_k_proj.lora_up.weight\n", - "transformer.transformer_blocks.15.attn.add_k_proj.lora_down.weight\n", - "transformer.transformer_blocks.15.attn.add_q_proj.lora_up.weight\n", - "transformer.transformer_blocks.15.attn.add_q_proj.lora_down.weight\n", - "transformer.transformer_blocks.15.attn.add_v_proj.lora_up.weight\n", - "transformer.transformer_blocks.15.attn.add_v_proj.lora_down.weight\n", - "transformer.transformer_blocks.15.attn.to_add_out.lora_up.weight\n", - "transformer.transformer_blocks.15.attn.to_add_out.lora_down.weight\n", - "transformer.transformer_blocks.15.attn.to_k.lora_up.weight\n", - "transformer.transformer_blocks.15.attn.to_k.lora_down.weight\n", - "transformer.transformer_blocks.15.attn.to_out.0.lora_up.weight\n", - "transformer.transformer_blocks.15.attn.to_out.0.lora_down.weight\n", - "transformer.transformer_blocks.15.attn.to_q.lora_up.weight\n", - "transformer.transformer_blocks.15.attn.to_q.lora_down.weight\n", - "transformer.transformer_blocks.15.attn.to_v.lora_up.weight\n", - "transformer.transformer_blocks.15.attn.to_v.lora_down.weight\n", - "transformer.transformer_blocks.15.ff.net.0.proj.lora_up.weight\n", - "transformer.transformer_blocks.15.ff.net.0.proj.lora_down.weight\n", - "transformer.transformer_blocks.15.ff.net.2.lora_up.weight\n", - "transformer.transformer_blocks.15.ff.net.2.lora_down.weight\n", - "transformer.transformer_blocks.15.ff_context.net.0.proj.lora_up.weight\n", - "transformer.transformer_blocks.15.ff_context.net.0.proj.lora_down.weight\n", - "transformer.transformer_blocks.15.ff_context.net.2.lora_up.weight\n", - "transformer.transformer_blocks.15.ff_context.net.2.lora_down.weight\n", - "transformer.transformer_blocks.15.norm1.linear.lora_up.weight\n", - "transformer.transformer_blocks.15.norm1.linear.lora_down.weight\n", - "transformer.transformer_blocks.15.norm1_context.linear.lora_up.weight\n", - "transformer.transformer_blocks.15.norm1_context.linear.lora_down.weight\n", - "transformer.transformer_blocks.16.attn.add_k_proj.lora_up.weight\n", - "transformer.transformer_blocks.16.attn.add_k_proj.lora_down.weight\n", - "transformer.transformer_blocks.16.attn.add_q_proj.lora_up.weight\n", - "transformer.transformer_blocks.16.attn.add_q_proj.lora_down.weight\n", - "transformer.transformer_blocks.16.attn.add_v_proj.lora_up.weight\n", - "transformer.transformer_blocks.16.attn.add_v_proj.lora_down.weight\n", - "transformer.transformer_blocks.16.attn.to_add_out.lora_up.weight\n", - "transformer.transformer_blocks.16.attn.to_add_out.lora_down.weight\n", - "transformer.transformer_blocks.16.attn.to_k.lora_up.weight\n", - "transformer.transformer_blocks.16.attn.to_k.lora_down.weight\n", - "transformer.transformer_blocks.16.attn.to_out.0.lora_up.weight\n", - "transformer.transformer_blocks.16.attn.to_out.0.lora_down.weight\n", - "transformer.transformer_blocks.16.attn.to_q.lora_up.weight\n", - "transformer.transformer_blocks.16.attn.to_q.lora_down.weight\n", - "transformer.transformer_blocks.16.attn.to_v.lora_up.weight\n", - "transformer.transformer_blocks.16.attn.to_v.lora_down.weight\n", - "transformer.transformer_blocks.16.ff.net.0.proj.lora_up.weight\n", - "transformer.transformer_blocks.16.ff.net.0.proj.lora_down.weight\n", - "transformer.transformer_blocks.16.ff.net.2.lora_up.weight\n", - "transformer.transformer_blocks.16.ff.net.2.lora_down.weight\n", - "transformer.transformer_blocks.16.ff_context.net.0.proj.lora_up.weight\n", - "transformer.transformer_blocks.16.ff_context.net.0.proj.lora_down.weight\n", - "transformer.transformer_blocks.16.ff_context.net.2.lora_up.weight\n", - "transformer.transformer_blocks.16.ff_context.net.2.lora_down.weight\n", - "transformer.transformer_blocks.16.norm1.linear.lora_up.weight\n", - "transformer.transformer_blocks.16.norm1.linear.lora_down.weight\n", - "transformer.transformer_blocks.16.norm1_context.linear.lora_up.weight\n", - "transformer.transformer_blocks.16.norm1_context.linear.lora_down.weight\n", - "transformer.transformer_blocks.17.attn.add_k_proj.lora_up.weight\n", - "transformer.transformer_blocks.17.attn.add_k_proj.lora_down.weight\n", - "transformer.transformer_blocks.17.attn.add_q_proj.lora_up.weight\n", - "transformer.transformer_blocks.17.attn.add_q_proj.lora_down.weight\n", - "transformer.transformer_blocks.17.attn.add_v_proj.lora_up.weight\n", - "transformer.transformer_blocks.17.attn.add_v_proj.lora_down.weight\n", - "transformer.transformer_blocks.17.attn.to_add_out.lora_up.weight\n", - "transformer.transformer_blocks.17.attn.to_add_out.lora_down.weight\n", - "transformer.transformer_blocks.17.attn.to_k.lora_up.weight\n", - "transformer.transformer_blocks.17.attn.to_k.lora_down.weight\n", - "transformer.transformer_blocks.17.attn.to_out.0.lora_up.weight\n", - "transformer.transformer_blocks.17.attn.to_out.0.lora_down.weight\n", - "transformer.transformer_blocks.17.attn.to_q.lora_up.weight\n", - "transformer.transformer_blocks.17.attn.to_q.lora_down.weight\n", - "transformer.transformer_blocks.17.attn.to_v.lora_up.weight\n", - "transformer.transformer_blocks.17.attn.to_v.lora_down.weight\n", - "transformer.transformer_blocks.17.ff.net.0.proj.lora_up.weight\n", - "transformer.transformer_blocks.17.ff.net.0.proj.lora_down.weight\n", - "transformer.transformer_blocks.17.ff.net.2.lora_up.weight\n", - "transformer.transformer_blocks.17.ff.net.2.lora_down.weight\n", - "transformer.transformer_blocks.17.ff_context.net.0.proj.lora_up.weight\n", - "transformer.transformer_blocks.17.ff_context.net.0.proj.lora_down.weight\n", - "transformer.transformer_blocks.17.ff_context.net.2.lora_up.weight\n", - "transformer.transformer_blocks.17.ff_context.net.2.lora_down.weight\n", - "transformer.transformer_blocks.17.norm1.linear.lora_up.weight\n", - "transformer.transformer_blocks.17.norm1.linear.lora_down.weight\n", - "transformer.transformer_blocks.17.norm1_context.linear.lora_up.weight\n", - "transformer.transformer_blocks.17.norm1_context.linear.lora_down.weight\n", - "transformer.transformer_blocks.18.attn.add_k_proj.lora_up.weight\n", - "transformer.transformer_blocks.18.attn.add_k_proj.lora_down.weight\n", - "transformer.transformer_blocks.18.attn.add_q_proj.lora_up.weight\n", - "transformer.transformer_blocks.18.attn.add_q_proj.lora_down.weight\n", - "transformer.transformer_blocks.18.attn.add_v_proj.lora_up.weight\n", - "transformer.transformer_blocks.18.attn.add_v_proj.lora_down.weight\n", - "transformer.transformer_blocks.18.attn.to_add_out.lora_up.weight\n", - "transformer.transformer_blocks.18.attn.to_add_out.lora_down.weight\n", - "transformer.transformer_blocks.18.attn.to_k.lora_up.weight\n", - "transformer.transformer_blocks.18.attn.to_k.lora_down.weight\n", - "transformer.transformer_blocks.18.attn.to_out.0.lora_up.weight\n", - "transformer.transformer_blocks.18.attn.to_out.0.lora_down.weight\n", - "transformer.transformer_blocks.18.attn.to_q.lora_up.weight\n", - "transformer.transformer_blocks.18.attn.to_q.lora_down.weight\n", - "transformer.transformer_blocks.18.attn.to_v.lora_up.weight\n", - "transformer.transformer_blocks.18.attn.to_v.lora_down.weight\n", - "transformer.transformer_blocks.18.ff.net.0.proj.lora_up.weight\n", - "transformer.transformer_blocks.18.ff.net.0.proj.lora_down.weight\n", - "transformer.transformer_blocks.18.ff.net.2.lora_up.weight\n", - "transformer.transformer_blocks.18.ff.net.2.lora_down.weight\n", - "transformer.transformer_blocks.18.ff_context.net.0.proj.lora_up.weight\n", - "transformer.transformer_blocks.18.ff_context.net.0.proj.lora_down.weight\n", - "transformer.transformer_blocks.18.ff_context.net.2.lora_up.weight\n", - "transformer.transformer_blocks.18.ff_context.net.2.lora_down.weight\n", - "transformer.transformer_blocks.18.norm1.linear.lora_up.weight\n", - "transformer.transformer_blocks.18.norm1.linear.lora_down.weight\n", - "transformer.transformer_blocks.18.norm1_context.linear.lora_up.weight\n", - "transformer.transformer_blocks.18.norm1_context.linear.lora_down.weight\n", - "transformer.transformer_blocks.2.attn.add_k_proj.lora_up.weight\n", - "transformer.transformer_blocks.2.attn.add_k_proj.lora_down.weight\n", - "transformer.transformer_blocks.2.attn.add_q_proj.lora_up.weight\n", - "transformer.transformer_blocks.2.attn.add_q_proj.lora_down.weight\n", - "transformer.transformer_blocks.2.attn.add_v_proj.lora_up.weight\n", - "transformer.transformer_blocks.2.attn.add_v_proj.lora_down.weight\n", - "transformer.transformer_blocks.2.attn.to_add_out.lora_up.weight\n", - "transformer.transformer_blocks.2.attn.to_add_out.lora_down.weight\n", - "transformer.transformer_blocks.2.attn.to_k.lora_up.weight\n", - "transformer.transformer_blocks.2.attn.to_k.lora_down.weight\n", - "transformer.transformer_blocks.2.attn.to_out.0.lora_up.weight\n", - "transformer.transformer_blocks.2.attn.to_out.0.lora_down.weight\n", - "transformer.transformer_blocks.2.attn.to_q.lora_up.weight\n", - "transformer.transformer_blocks.2.attn.to_q.lora_down.weight\n", - "transformer.transformer_blocks.2.attn.to_v.lora_up.weight\n", - "transformer.transformer_blocks.2.attn.to_v.lora_down.weight\n", - "transformer.transformer_blocks.2.ff.net.0.proj.lora_up.weight\n", - "transformer.transformer_blocks.2.ff.net.0.proj.lora_down.weight\n", - "transformer.transformer_blocks.2.ff.net.2.lora_up.weight\n", - "transformer.transformer_blocks.2.ff.net.2.lora_down.weight\n", - "transformer.transformer_blocks.2.ff_context.net.0.proj.lora_up.weight\n", - "transformer.transformer_blocks.2.ff_context.net.0.proj.lora_down.weight\n", - "transformer.transformer_blocks.2.ff_context.net.2.lora_up.weight\n", - "transformer.transformer_blocks.2.ff_context.net.2.lora_down.weight\n", - "transformer.transformer_blocks.2.norm1.linear.lora_up.weight\n", - "transformer.transformer_blocks.2.norm1.linear.lora_down.weight\n", - "transformer.transformer_blocks.2.norm1_context.linear.lora_up.weight\n", - "transformer.transformer_blocks.2.norm1_context.linear.lora_down.weight\n", - "transformer.transformer_blocks.3.attn.add_k_proj.lora_up.weight\n", - "transformer.transformer_blocks.3.attn.add_k_proj.lora_down.weight\n", - "transformer.transformer_blocks.3.attn.add_q_proj.lora_up.weight\n", - "transformer.transformer_blocks.3.attn.add_q_proj.lora_down.weight\n", - "transformer.transformer_blocks.3.attn.add_v_proj.lora_up.weight\n", - "transformer.transformer_blocks.3.attn.add_v_proj.lora_down.weight\n", - "transformer.transformer_blocks.3.attn.to_add_out.lora_up.weight\n", - "transformer.transformer_blocks.3.attn.to_add_out.lora_down.weight\n", - "transformer.transformer_blocks.3.attn.to_k.lora_up.weight\n", - "transformer.transformer_blocks.3.attn.to_k.lora_down.weight\n", - "transformer.transformer_blocks.3.attn.to_out.0.lora_up.weight\n", - "transformer.transformer_blocks.3.attn.to_out.0.lora_down.weight\n", - "transformer.transformer_blocks.3.attn.to_q.lora_up.weight\n", - "transformer.transformer_blocks.3.attn.to_q.lora_down.weight\n", - "transformer.transformer_blocks.3.attn.to_v.lora_up.weight\n", - "transformer.transformer_blocks.3.attn.to_v.lora_down.weight\n", - "transformer.transformer_blocks.3.ff.net.0.proj.lora_up.weight\n", - "transformer.transformer_blocks.3.ff.net.0.proj.lora_down.weight\n", - "transformer.transformer_blocks.3.ff.net.2.lora_up.weight\n", - "transformer.transformer_blocks.3.ff.net.2.lora_down.weight\n", - "transformer.transformer_blocks.3.ff_context.net.0.proj.lora_up.weight\n", - "transformer.transformer_blocks.3.ff_context.net.0.proj.lora_down.weight\n", - "transformer.transformer_blocks.3.ff_context.net.2.lora_up.weight\n", - "transformer.transformer_blocks.3.ff_context.net.2.lora_down.weight\n", - "transformer.transformer_blocks.3.norm1.linear.lora_up.weight\n", - "transformer.transformer_blocks.3.norm1.linear.lora_down.weight\n", - "transformer.transformer_blocks.3.norm1_context.linear.lora_up.weight\n", - "transformer.transformer_blocks.3.norm1_context.linear.lora_down.weight\n", - "transformer.transformer_blocks.4.attn.add_k_proj.lora_up.weight\n", - "transformer.transformer_blocks.4.attn.add_k_proj.lora_down.weight\n", - "transformer.transformer_blocks.4.attn.add_q_proj.lora_up.weight\n", - "transformer.transformer_blocks.4.attn.add_q_proj.lora_down.weight\n", - "transformer.transformer_blocks.4.attn.add_v_proj.lora_up.weight\n", - "transformer.transformer_blocks.4.attn.add_v_proj.lora_down.weight\n", - "transformer.transformer_blocks.4.attn.to_add_out.lora_up.weight\n", - "transformer.transformer_blocks.4.attn.to_add_out.lora_down.weight\n", - "transformer.transformer_blocks.4.attn.to_k.lora_up.weight\n", - "transformer.transformer_blocks.4.attn.to_k.lora_down.weight\n", - "transformer.transformer_blocks.4.attn.to_out.0.lora_up.weight\n", - "transformer.transformer_blocks.4.attn.to_out.0.lora_down.weight\n", - "transformer.transformer_blocks.4.attn.to_q.lora_up.weight\n", - "transformer.transformer_blocks.4.attn.to_q.lora_down.weight\n", - "transformer.transformer_blocks.4.attn.to_v.lora_up.weight\n", - "transformer.transformer_blocks.4.attn.to_v.lora_down.weight\n", - "transformer.transformer_blocks.4.ff.net.0.proj.lora_up.weight\n", - "transformer.transformer_blocks.4.ff.net.0.proj.lora_down.weight\n", - "transformer.transformer_blocks.4.ff.net.2.lora_up.weight\n", - "transformer.transformer_blocks.4.ff.net.2.lora_down.weight\n", - "transformer.transformer_blocks.4.ff_context.net.0.proj.lora_up.weight\n", - "transformer.transformer_blocks.4.ff_context.net.0.proj.lora_down.weight\n", - "transformer.transformer_blocks.4.ff_context.net.2.lora_up.weight\n", - "transformer.transformer_blocks.4.ff_context.net.2.lora_down.weight\n", - "transformer.transformer_blocks.4.norm1.linear.lora_up.weight\n", - "transformer.transformer_blocks.4.norm1.linear.lora_down.weight\n", - "transformer.transformer_blocks.4.norm1_context.linear.lora_up.weight\n", - "transformer.transformer_blocks.4.norm1_context.linear.lora_down.weight\n", - "transformer.transformer_blocks.5.attn.add_k_proj.lora_up.weight\n", - "transformer.transformer_blocks.5.attn.add_k_proj.lora_down.weight\n", - "transformer.transformer_blocks.5.attn.add_q_proj.lora_up.weight\n", - "transformer.transformer_blocks.5.attn.add_q_proj.lora_down.weight\n", - "transformer.transformer_blocks.5.attn.add_v_proj.lora_up.weight\n", - "transformer.transformer_blocks.5.attn.add_v_proj.lora_down.weight\n", - "transformer.transformer_blocks.5.attn.to_add_out.lora_up.weight\n", - "transformer.transformer_blocks.5.attn.to_add_out.lora_down.weight\n", - "transformer.transformer_blocks.5.attn.to_k.lora_up.weight\n", - "transformer.transformer_blocks.5.attn.to_k.lora_down.weight\n", - "transformer.transformer_blocks.5.attn.to_out.0.lora_up.weight\n", - "transformer.transformer_blocks.5.attn.to_out.0.lora_down.weight\n", - "transformer.transformer_blocks.5.attn.to_q.lora_up.weight\n", - "transformer.transformer_blocks.5.attn.to_q.lora_down.weight\n", - "transformer.transformer_blocks.5.attn.to_v.lora_up.weight\n", - "transformer.transformer_blocks.5.attn.to_v.lora_down.weight\n", - "transformer.transformer_blocks.5.ff.net.0.proj.lora_up.weight\n", - "transformer.transformer_blocks.5.ff.net.0.proj.lora_down.weight\n", - "transformer.transformer_blocks.5.ff.net.2.lora_up.weight\n", - "transformer.transformer_blocks.5.ff.net.2.lora_down.weight\n", - "transformer.transformer_blocks.5.ff_context.net.0.proj.lora_up.weight\n", - "transformer.transformer_blocks.5.ff_context.net.0.proj.lora_down.weight\n", - "transformer.transformer_blocks.5.ff_context.net.2.lora_up.weight\n", - "transformer.transformer_blocks.5.ff_context.net.2.lora_down.weight\n", - "transformer.transformer_blocks.5.norm1.linear.lora_up.weight\n", - "transformer.transformer_blocks.5.norm1.linear.lora_down.weight\n", - "transformer.transformer_blocks.5.norm1_context.linear.lora_up.weight\n", - "transformer.transformer_blocks.5.norm1_context.linear.lora_down.weight\n", - "transformer.transformer_blocks.6.attn.add_k_proj.lora_up.weight\n", - "transformer.transformer_blocks.6.attn.add_k_proj.lora_down.weight\n", - "transformer.transformer_blocks.6.attn.add_q_proj.lora_up.weight\n", - "transformer.transformer_blocks.6.attn.add_q_proj.lora_down.weight\n", - "transformer.transformer_blocks.6.attn.add_v_proj.lora_up.weight\n", - "transformer.transformer_blocks.6.attn.add_v_proj.lora_down.weight\n", - "transformer.transformer_blocks.6.attn.to_add_out.lora_up.weight\n", - "transformer.transformer_blocks.6.attn.to_add_out.lora_down.weight\n", - "transformer.transformer_blocks.6.attn.to_k.lora_up.weight\n", - "transformer.transformer_blocks.6.attn.to_k.lora_down.weight\n", - "transformer.transformer_blocks.6.attn.to_out.0.lora_up.weight\n", - "transformer.transformer_blocks.6.attn.to_out.0.lora_down.weight\n", - "transformer.transformer_blocks.6.attn.to_q.lora_up.weight\n", - "transformer.transformer_blocks.6.attn.to_q.lora_down.weight\n", - "transformer.transformer_blocks.6.attn.to_v.lora_up.weight\n", - "transformer.transformer_blocks.6.attn.to_v.lora_down.weight\n", - "transformer.transformer_blocks.6.ff.net.0.proj.lora_up.weight\n", - "transformer.transformer_blocks.6.ff.net.0.proj.lora_down.weight\n", - "transformer.transformer_blocks.6.ff.net.2.lora_up.weight\n", - "transformer.transformer_blocks.6.ff.net.2.lora_down.weight\n", - "transformer.transformer_blocks.6.ff_context.net.0.proj.lora_up.weight\n", - "transformer.transformer_blocks.6.ff_context.net.0.proj.lora_down.weight\n", - "transformer.transformer_blocks.6.ff_context.net.2.lora_up.weight\n", - "transformer.transformer_blocks.6.ff_context.net.2.lora_down.weight\n", - "transformer.transformer_blocks.6.norm1.linear.lora_up.weight\n", - "transformer.transformer_blocks.6.norm1.linear.lora_down.weight\n", - "transformer.transformer_blocks.6.norm1_context.linear.lora_up.weight\n", - "transformer.transformer_blocks.6.norm1_context.linear.lora_down.weight\n", - "transformer.transformer_blocks.7.attn.add_k_proj.lora_up.weight\n", - "transformer.transformer_blocks.7.attn.add_k_proj.lora_down.weight\n", - "transformer.transformer_blocks.7.attn.add_q_proj.lora_up.weight\n", - "transformer.transformer_blocks.7.attn.add_q_proj.lora_down.weight\n", - "transformer.transformer_blocks.7.attn.add_v_proj.lora_up.weight\n", - "transformer.transformer_blocks.7.attn.add_v_proj.lora_down.weight\n", - "transformer.transformer_blocks.7.attn.to_add_out.lora_up.weight\n", - "transformer.transformer_blocks.7.attn.to_add_out.lora_down.weight\n", - "transformer.transformer_blocks.7.attn.to_k.lora_up.weight\n", - "transformer.transformer_blocks.7.attn.to_k.lora_down.weight\n", - "transformer.transformer_blocks.7.attn.to_out.0.lora_up.weight\n", - "transformer.transformer_blocks.7.attn.to_out.0.lora_down.weight\n", - "transformer.transformer_blocks.7.attn.to_q.lora_up.weight\n", - "transformer.transformer_blocks.7.attn.to_q.lora_down.weight\n", - "transformer.transformer_blocks.7.attn.to_v.lora_up.weight\n", - "transformer.transformer_blocks.7.attn.to_v.lora_down.weight\n", - "transformer.transformer_blocks.7.ff.net.0.proj.lora_up.weight\n", - "transformer.transformer_blocks.7.ff.net.0.proj.lora_down.weight\n", - "transformer.transformer_blocks.7.ff.net.2.lora_up.weight\n", - "transformer.transformer_blocks.7.ff.net.2.lora_down.weight\n", - "transformer.transformer_blocks.7.ff_context.net.0.proj.lora_up.weight\n", - "transformer.transformer_blocks.7.ff_context.net.0.proj.lora_down.weight\n", - "transformer.transformer_blocks.7.ff_context.net.2.lora_up.weight\n", - "transformer.transformer_blocks.7.ff_context.net.2.lora_down.weight\n", - "transformer.transformer_blocks.7.norm1.linear.lora_up.weight\n", - "transformer.transformer_blocks.7.norm1.linear.lora_down.weight\n", - "transformer.transformer_blocks.7.norm1_context.linear.lora_up.weight\n", - "transformer.transformer_blocks.7.norm1_context.linear.lora_down.weight\n", - "transformer.transformer_blocks.8.attn.add_k_proj.lora_up.weight\n", - "transformer.transformer_blocks.8.attn.add_k_proj.lora_down.weight\n", - "transformer.transformer_blocks.8.attn.add_q_proj.lora_up.weight\n", - "transformer.transformer_blocks.8.attn.add_q_proj.lora_down.weight\n", - "transformer.transformer_blocks.8.attn.add_v_proj.lora_up.weight\n", - "transformer.transformer_blocks.8.attn.add_v_proj.lora_down.weight\n", - "transformer.transformer_blocks.8.attn.to_add_out.lora_up.weight\n", - "transformer.transformer_blocks.8.attn.to_add_out.lora_down.weight\n", - "transformer.transformer_blocks.8.attn.to_k.lora_up.weight\n", - "transformer.transformer_blocks.8.attn.to_k.lora_down.weight\n", - "transformer.transformer_blocks.8.attn.to_out.0.lora_up.weight\n", - "transformer.transformer_blocks.8.attn.to_out.0.lora_down.weight\n", - "transformer.transformer_blocks.8.attn.to_q.lora_up.weight\n", - "transformer.transformer_blocks.8.attn.to_q.lora_down.weight\n", - "transformer.transformer_blocks.8.attn.to_v.lora_up.weight\n", - "transformer.transformer_blocks.8.attn.to_v.lora_down.weight\n", - "transformer.transformer_blocks.8.ff.net.0.proj.lora_up.weight\n", - "transformer.transformer_blocks.8.ff.net.0.proj.lora_down.weight\n", - "transformer.transformer_blocks.8.ff.net.2.lora_up.weight\n", - "transformer.transformer_blocks.8.ff.net.2.lora_down.weight\n", - "transformer.transformer_blocks.8.ff_context.net.0.proj.lora_up.weight\n", - "transformer.transformer_blocks.8.ff_context.net.0.proj.lora_down.weight\n", - "transformer.transformer_blocks.8.ff_context.net.2.lora_up.weight\n", - "transformer.transformer_blocks.8.ff_context.net.2.lora_down.weight\n", - "transformer.transformer_blocks.8.norm1.linear.lora_up.weight\n", - "transformer.transformer_blocks.8.norm1.linear.lora_down.weight\n", - "transformer.transformer_blocks.8.norm1_context.linear.lora_up.weight\n", - "transformer.transformer_blocks.8.norm1_context.linear.lora_down.weight\n", - "transformer.transformer_blocks.9.attn.add_k_proj.lora_up.weight\n", - "transformer.transformer_blocks.9.attn.add_k_proj.lora_down.weight\n", - "transformer.transformer_blocks.9.attn.add_q_proj.lora_up.weight\n", - "transformer.transformer_blocks.9.attn.add_q_proj.lora_down.weight\n", - "transformer.transformer_blocks.9.attn.add_v_proj.lora_up.weight\n", - "transformer.transformer_blocks.9.attn.add_v_proj.lora_down.weight\n", - "transformer.transformer_blocks.9.attn.to_add_out.lora_up.weight\n", - "transformer.transformer_blocks.9.attn.to_add_out.lora_down.weight\n", - "transformer.transformer_blocks.9.attn.to_k.lora_up.weight\n", - "transformer.transformer_blocks.9.attn.to_k.lora_down.weight\n", - "transformer.transformer_blocks.9.attn.to_out.0.lora_up.weight\n", - "transformer.transformer_blocks.9.attn.to_out.0.lora_down.weight\n", - "transformer.transformer_blocks.9.attn.to_q.lora_up.weight\n", - "transformer.transformer_blocks.9.attn.to_q.lora_down.weight\n", - "transformer.transformer_blocks.9.attn.to_v.lora_up.weight\n", - "transformer.transformer_blocks.9.attn.to_v.lora_down.weight\n", - "transformer.transformer_blocks.9.ff.net.0.proj.lora_up.weight\n", - "transformer.transformer_blocks.9.ff.net.0.proj.lora_down.weight\n", - "transformer.transformer_blocks.9.ff.net.2.lora_up.weight\n", - "transformer.transformer_blocks.9.ff.net.2.lora_down.weight\n", - "transformer.transformer_blocks.9.ff_context.net.0.proj.lora_up.weight\n", - "transformer.transformer_blocks.9.ff_context.net.0.proj.lora_down.weight\n", - "transformer.transformer_blocks.9.ff_context.net.2.lora_up.weight\n", - "transformer.transformer_blocks.9.ff_context.net.2.lora_down.weight\n", - "transformer.transformer_blocks.9.norm1.linear.lora_up.weight\n", - "transformer.transformer_blocks.9.norm1.linear.lora_down.weight\n", - "transformer.transformer_blocks.9.norm1_context.linear.lora_up.weight\n", - "transformer.transformer_blocks.9.norm1_context.linear.lora_down.weight\n", - "---------STAR---------\n" - ] - } - ] - }, - { - "cell_type": "code", - "source": [ - "down = 'lora_unet_double_blocks_0_img_attn_qkv.lora_down.weight'\n", - "up = 'lora_unet_double_blocks_0_img_attn_qkv.lora_up.weight'\n", - "tgt = star\n", - "print(\"STAR\")\n", - "print(tgt[f'{up}'].shape)\n", - "#print(torch.matmul(tgt[f'{up}'],tgt[f'{down}']).shape)\n", - "\n", - "down = 'transformer.transformer_blocks.0.attn.to_k.lora_down.weight'\n", - "up = 'transformer.transformer_blocks.0.attn.to_k.lora_up.weight'\n", - "tgt = oily\n", - "print(\"VS. OILY\")\n", - "print(tgt[f'{up}'].shape)\n", - "#print(torch.matmul(tgt[f'{up}'],tgt[f'{down}']).shape)\n" - ], - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "GoDfgENYaWD7", - "outputId": "9336ae1a-6244-4e76-f291-82cda4482831" - }, - "execution_count": 17, - "outputs": [ - { - "output_type": "stream", - "name": "stdout", - "text": [ - "STAR\n", - "torch.Size([9216, 32])\n", - "VS. OILY\n", - "torch.Size([3072, 32])\n" - ] - } - ] - }, - { - "cell_type": "code", - "source": [ - "for key in oily:\n", - " print(oily[f'{key}'].shape)" - ], - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "xQhVLouEfmGE", - "outputId": "662176b3-480d-48eb-f5db-97ec71b5e970" - }, - "execution_count": 18, - "outputs": [ - { - "output_type": "stream", - "name": "stdout", - "text": [ - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([9216, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([12288, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 15360])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([9216, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([12288, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 15360])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([9216, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([12288, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 15360])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([9216, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([12288, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 15360])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([9216, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([12288, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 15360])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([9216, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([12288, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 15360])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([9216, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([12288, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 15360])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([9216, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([12288, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 15360])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([9216, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([12288, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 15360])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([9216, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([12288, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 15360])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([9216, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([12288, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 15360])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([9216, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([12288, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 15360])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([9216, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([12288, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 15360])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([9216, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([12288, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 15360])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([9216, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([12288, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 15360])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([9216, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([12288, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 15360])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([9216, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([12288, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 15360])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([9216, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([12288, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 15360])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([9216, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([12288, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 15360])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([9216, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([12288, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 15360])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([9216, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([12288, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 15360])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([9216, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([12288, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 15360])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([9216, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([12288, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 15360])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([9216, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([12288, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 15360])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([9216, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([12288, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 15360])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([9216, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([12288, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 15360])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([9216, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([12288, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 15360])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([9216, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([12288, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 15360])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([9216, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([12288, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 15360])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([9216, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([12288, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 15360])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([9216, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([12288, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 15360])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([9216, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([12288, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 15360])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([9216, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([12288, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 15360])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([9216, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([12288, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 15360])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([9216, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([12288, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 15360])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([9216, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([12288, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 15360])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([9216, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([12288, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 15360])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([9216, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([12288, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 15360])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([12288, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 12288])\n", - "torch.Size([12288, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 12288])\n", - "torch.Size([18432, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([18432, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([12288, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 12288])\n", - "torch.Size([12288, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 12288])\n", - "torch.Size([18432, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([18432, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([12288, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 12288])\n", - "torch.Size([12288, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 12288])\n", - "torch.Size([18432, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([18432, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([12288, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 12288])\n", - "torch.Size([12288, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 12288])\n", - "torch.Size([18432, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([18432, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([12288, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 12288])\n", - "torch.Size([12288, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 12288])\n", - "torch.Size([18432, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([18432, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([12288, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 12288])\n", - "torch.Size([12288, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 12288])\n", - "torch.Size([18432, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([18432, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([12288, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 12288])\n", - "torch.Size([12288, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 12288])\n", - "torch.Size([18432, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([18432, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([12288, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 12288])\n", - "torch.Size([12288, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 12288])\n", - "torch.Size([18432, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([18432, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([12288, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 12288])\n", - "torch.Size([12288, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 12288])\n", - "torch.Size([18432, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([18432, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([12288, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 12288])\n", - "torch.Size([12288, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 12288])\n", - "torch.Size([18432, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([18432, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([12288, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 12288])\n", - "torch.Size([12288, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 12288])\n", - "torch.Size([18432, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([18432, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([12288, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 12288])\n", - "torch.Size([12288, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 12288])\n", - "torch.Size([18432, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([18432, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([12288, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 12288])\n", - "torch.Size([12288, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 12288])\n", - "torch.Size([18432, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([18432, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([12288, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 12288])\n", - "torch.Size([12288, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 12288])\n", - "torch.Size([18432, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([18432, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([12288, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 12288])\n", - "torch.Size([12288, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 12288])\n", - "torch.Size([18432, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([18432, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([12288, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 12288])\n", - "torch.Size([12288, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 12288])\n", - "torch.Size([18432, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([18432, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([12288, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 12288])\n", - "torch.Size([12288, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 12288])\n", - "torch.Size([18432, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([18432, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([12288, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 12288])\n", - "torch.Size([12288, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 12288])\n", - "torch.Size([18432, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([18432, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([12288, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 12288])\n", - "torch.Size([12288, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([3072, 32])\n", - "torch.Size([32, 12288])\n", - "torch.Size([18432, 32])\n", - "torch.Size([32, 3072])\n", - "torch.Size([18432, 32])\n", - "torch.Size([32, 3072])\n" - ] - } - ] - }, - { - "cell_type": "code", - "source": [ - "\n", - "import torch\n", - "from safetensors.torch import load_file, save_file\n", - "import torch.nn as nn\n", - "from torch import linalg as LA\n", - "import os\n", - "import math\n", - "import random\n", - "import numpy as np\n", - "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n", - "def _filter(tgt , pcnt, largest):\n", - " num_topk = math.floor(tgt.numel()*(pcnt/100))\n", - " y = tgt.flatten().to(device = device , dtype=torch.float32)\n", - " values,indices = torch.topk( y , num_topk , largest=largest)\n", - " _values,_indices = torch.topk( -y , num_topk , largest=largest)\n", - " y = y*0\n", - " y[indices] = 1\n", - " y[_indices] = 1\n", - " y = y.unflatten(0,tgt.shape).to(device = device , dtype=torch.float32)\n", - " return torch.mul(tgt,y)\n", - "\n", - "#----#\n", - "\n", - "# For pcnt = 30 , 'filter_and_save' will keep all top 30 % values\n", - "#, and the lowest (negative) 30% values for each layer delta_W in this lora\n", - "# Then save the new filtered lora as a .safetensor file\n", - "def filter_and_save(_lora , savefile_name, new_rank , new_alpha, thresh):\n", - " lora = {}\n", - " count = 0\n", - " for key in _lora:count = count + 1\n", - " NUM_ITEMS = count\n", - " count = 0\n", - " thresh = resolution*0.000001 # 1e-6\n", - " #-------#\n", - " for key in _lora:\n", - " if f'{key}'.find('alpha') > -1:\n", - " lora[f'{key}'] = torch.tensor(new_alpha).to(device = device , dtype = torch.float32)\n", - " count = count + 1\n", - " print(f'{count} / {NUM_ITEMS}')\n", - " continue\n", - " #------#\n", - " if not f'{key}'.find('lora_down') > -1: continue\n", - " up = f'{key}'.replace('lora_down' , 'lora_up')\n", - " down = f'{key}'\n", - " #-------#\n", - " delta_W = torch.matmul(_lora[up],_lora[down]).to(device = device , dtype=torch.float32)\n", - " #---#\n", - " N = delta_W.numel()\n", - " y = delta_W.flatten().to(device = device , dtype=torch.float32)\n", - " values,indices = torch.sort(y, descending = False) # smallest -> largest elements\n", - " y = torch.zeros(y.shape).to(device = device , dtype=torch.float32)\n", - " y[indices[values>thresh]] = 1\n", - " y[indices[values<-thresh]] = 1\n", - " y = y.unflatten(0,delta_W.shape).to(device = device , dtype=torch.float32)\n", - " delta_W = torch.mul(delta_W,y).to(device = device , dtype=torch.float32)\n", - " #------#\n", - " tmp={}\n", - " tmp['u'], tmp['s'], tmp['Vh'] = torch.svd(delta_W)\n", - " tmp['u'] = tmp['u'][:,: new_rank]\n", - " tmp['s'] = tmp['s'][: new_rank]\n", - " #-------#\n", - " tmp['u'] = torch.round(torch.matmul(tmp['u'], torch.diag(tmp['s'])),decimals=6)\n", - " tmp['Vh'] = torch.round(tmp['Vh'].t()[: new_rank,:],decimals=6)\n", - " #-------#\n", - " for key in tmp:tmp[f'{key}'] = tmp[f'{key}'].contiguous()\n", - " lora[up] = tmp['u'].to(device = device , dtype=torch.float32)\n", - " lora[down] = tmp['Vh'].to(device = device , dtype=torch.float32)\n", - " #-------#\n", - " count = count + 2\n", - " print(f'{count} / {NUM_ITEMS}')\n", - " #-------#\n", - " print(f'done!')\n", - " print(f'casting params to fp16....')\n", - " for key in _lora: lora[f'{key}'] = lora[f'{key}'].to(device = device , dtype=torch.float16)\n", - " #-------#\n", - " print(f'done!')\n", - " print(f'saving {savefile_name}...')\n", - " save_file(lora , f'{savefile_name}')\n", - "#--------#\n", - "\n", - "\n", - "new_rank = 32\n", - "new_alpha = new_rank/2\n", - "resolution = 100\n", - "star = load_file('/kaggle/input/flux-loras/yeero.safetensors')\n", - "for key in star:\n", - " star[f'{key}'] = star[f'{key}'].to(device = device , dtype = torch.float32)\n", - "\n", - "filter_and_save(star , f'yeero_{resolution}_r{new_rank}_{new_alpha}alpha.safetensors' , new_rank , new_alpha, resolution)\n", - "\n", - "#pcnt = 30\n", - "#new_rank = 6\n", - "#filter_and_save(yeero , f'yeero_topk{pcnt}_r{new_rank}.safetensors' , pcnt , new_rank)\n", - "#filter_and_save(euro , f'euro_topk{pcnt}_r{new_rank}.safetensors' , pcnt , new_rank)\n", - "#filter_and_save(star , f'star_topk{pcnt}_r{new_rank}.safetensors' , pcnt , new_rank)\n" - ], - "metadata": { - "id": "f46xbSVkUlDl" - }, - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "code", - "source": [ - "cgi = load_file('/content/drive/MyDrive/Saved from Chrome/cgi_style.safetensors')" - ], - "metadata": { - "id": "JuGDCX5272Bh" - }, - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "code", - "source": [ - "#cgi = load_file('/content/drive/MyDrive/Saved from Chrome/cgi_style.safetensors')\n", - "doll = load_file('/content/drive/MyDrive/Saved from Chrome/dolls.safetensors')\n", - "euro = load_file('/content/drive/MyDrive/Saved from Chrome/euro.safetensors')\n", - "scale = load_file('/content/drive/MyDrive/Saved from Chrome/scale.safetensors')\n", - "cgi = load_file('/content/drive/MyDrive/Saved from Chrome/cgi.safetensors')\n", - "guns = load_file('/content/drive/MyDrive/Saved from Chrome/guns.safetensors')\n", - "iris = load_file('/content/drive/MyDrive/Saved from Chrome/iris.safetensors')" - ], - "metadata": { - "id": "FftDdBRG7su6" - }, - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "code", - "source": [ - "for key in doll:\n", - " doll[f'{key}'] = doll[f'{key}'].to(dtype=torch.float16)\n", - " euro[f'{key}'] = euro[f'{key}'].to(dtype=torch.float16)\n", - " scale[f'{key}'] = scale[f'{key}'].to(dtype=torch.float16)" - ], - "metadata": { - "id": "RII9SEqh8KH2" - }, - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "code", - "source": [ - "import torch\n", - "import torch.nn as nn\n", - "#define metric for similarity\n", - "tgt_dim = torch.Size([64, 3072])\n", - "cos0 = nn.CosineSimilarity(dim=1)\n", - "cos = nn.CosineSimilarity(dim=1)\n", - "\n", - "\n", - "def sim(tgt , ref ,key):\n", - " return torch.sum(torch.abs(cos(tgt, ref[f'{key}']))) + torch.sum(torch.abs(cos0(tgt, ref[f'{key}'])))\n", - "#-----#\n", - "\n", - "from torch import linalg as LA\n", - "\n", - "LA.matrix_norm\n", - "def rand_search(A , B , key , iters):\n", - " tgt_norm = (LA.matrix_norm(A[f'{key}']) + LA.matrix_norm(B[f'{key}']))/2\n", - " tgt_avg = (A[f'{key}'] + B[f'{key}'])/2\n", - "\n", - " max_sim = (sim(tgt_avg , A , key) + sim(tgt_avg , B , key))\n", - " cand = tgt_avg\n", - "\n", - " for iter in range(iters):\n", - " rand = torch.ones(tgt_dim)*(-0.5) + torch.rand(tgt_dim)\n", - " rand = rand * (tgt_norm/LA.matrix_norm(rand))\n", - " #rand = (rand + tgt_avg)/2\n", - " #rand = rand * (tgt_norm/LA.matrix_norm(rand))\n", - "\n", - " tmp = sim(rand,A, key) + sim(rand , B, key)\n", - " if (tmp > max_sim):\n", - " max_sim = tmp\n", - " cand = rand\n", - " print('found!')\n", - " break\n", - " #------#\n", - " print('returning')\n", - " return cand , max_sim\n", - "#-----#" - ], - "metadata": { - "id": "hJL6QEclHdHn" - }, - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "code", - "source": [ - "cand , max_sim = rand_search(cgi , iris , 'lora_unet_double_blocks_0_img_attn_proj.lora_down.weight' , 1000)\n", - "print(sim(cand , iris , key))\n", - "print(sim(cand , cgi , key))" - ], - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "ckyBSQi5Ll4F", - "outputId": "341f7192-083d-4423-f61f-4f49d5756e79" - }, - "execution_count": null, - "outputs": [ - { - "output_type": "stream", - "name": "stdout", - "text": [ - "returning\n", - "tensor(91.1875, dtype=torch.float16)\n", - "tensor(90.2500, dtype=torch.float16)\n" - ] - } - ] - }, - { - "cell_type": "code", - "source": [ - "(torch.rand(1).to(dtype=torch.float16)*3).item()" - ], - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "XLwslN61hiIJ", - "outputId": "9e3cbba6-3727-4772-f453-fecf8a408790" - }, - "execution_count": null, - "outputs": [ - { - "output_type": "execute_result", - "data": { - "text/plain": [ - "0.2138671875" - ] - }, - "metadata": {}, - "execution_count": 16 - } - ] - }, - { - "cell_type": "code", - "source": [ - "torch.rand(1).to(dtype=torch.float16)*10" - ], - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "AKwh0lZ1f8dJ", - "outputId": "59186526-bd73-4efe-925a-3e7a9c738e53" - }, - "execution_count": null, - "outputs": [ - { - "output_type": "execute_result", - "data": { - "text/plain": [ - "tensor([6.8555], dtype=torch.float16)" - ] - }, - "metadata": {}, - "execution_count": 13 - } - ] - }, - { - "cell_type": "code", - "source": [ - "import torch\n", - "import torch.nn as nn\n", - "#define metric for similarity\n", - "tgt_dim = torch.Size([64, 3072])\n", - "cos0 = nn.CosineSimilarity(dim=0)\n", - "\n", - "\n", - "\n", - "cos = nn.CosineSimilarity(dim=1)\n", - "\n", - "\n", - "def sim(tgt , ref ,key):\n", - " return torch.sum(torch.abs(cos(tgt, ref[f'{key}']))) + torch.sum(torch.abs(cos0(tgt, ref[f'{key}'])))\n", - "#-----#" - ], - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "SNCvvkb2h3Zb", - "outputId": "725fabd1-3fe2-4ac2-f24c-5f9309d45e4a" - }, - "execution_count": null, - "outputs": [ - { - "output_type": "execute_result", - "data": { - "text/plain": [ - "7.715576171875" - ] - }, - "metadata": {}, - "execution_count": 37 - } - ] - }, - { - "cell_type": "code", - "source": [ - "from safetensors.torch import load_file , save_file\n", - "import torch\n", - "import torch.nn as nn\n", - "from torch import linalg as LA\n", - "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n", - "#define metric for similarity\n", - "cos0 = nn.CosineSimilarity(dim=0).to(device)\n", - "final_score = 0\n", - "highest_score = 0\n", - "w_cgi = 1\n", - "w_doll = 2\n", - "w_euro = 2\n", - "w_guns = 1\n", - "w_iris = 2\n", - "w_scale = 1\n", - "\n", - "w_noise = 0.00001 * (w_cgi + w_doll + w_euro + w_guns + w_iris + w_scale)\n", - "fixed_noise = {}\n", - "\n", - "#for key in doll:\n", - "# fixed_noise[f'{key}'] = torch.zeros(doll[f'{key}'].shape).to(device = device , dtype=torch.float16)\n", - "#------#\n", - "#w_offset = 0* (w1+w2+w3)\n", - "#_w_offset = 0\n", - "\n", - "W = (w_cgi + w_doll + w_euro + w_guns + w_iris + w_scale + w_noise)*torch.ones(1).to(device = device,dtype=torch.float16)\n", - "\n", - "SCALE = 0.0001\n", - "one = torch.ones(1).to(dtype=torch.float16).to(device)\n", - "\n", - "for attempt in range(1000):\n", - " print(f'attempt no : {attempt+1} ')\n", - " merge = load_file('/content/drive/MyDrive/Saved from Chrome/dolls.safetensors')\n", - " for key in doll:\n", - " tgt_dim = doll[f'{key}'].shape\n", - " if tgt_dim == torch.Size([]): continue\n", - " r_cgi = torch.rand(1).to(device = device,dtype=torch.float16).item()*w_cgi\n", - " r_doll = torch.rand(1).to(device = device,dtype=torch.float16).item()*w_doll\n", - " r_euro = torch.rand(1).to(device = device,dtype=torch.float16).item()*w_euro\n", - " r_guns = torch.rand(1).to(device = device,dtype=torch.float16).item()*w_guns\n", - " r_iris = torch.rand(1).to(device = device,dtype=torch.float16).item()*w_iris\n", - " r_scale = torch.rand(1).to(device = device,dtype=torch.float16).item()*w_scale\n", - " #------#\n", - " noise = torch.rand(tgt_dim).to(device = device,dtype=torch.float16)\n", - " noise_norm = LA.matrix_norm(noise).to(device = device,dtype=torch.float16).item()\n", - " noise = (w_noise/noise_norm)*noise.to(device = device,dtype=torch.float16)\n", - " #-----#\n", - " merge[f'{key}'] = r_cgi * cgi[f'{key}'] #overwrite\n", - " merge[f'{key}'] = merge[f'{key}'] + r_doll * doll[f'{key}']\n", - " merge[f'{key}'] = merge[f'{key}'] + r_euro * euro[f'{key}']\n", - " merge[f'{key}'] = merge[f'{key}'] + r_guns * guns[f'{key}']\n", - " merge[f'{key}'] = merge[f'{key}'] + r_iris * iris[f'{key}']\n", - " merge[f'{key}'] = merge[f'{key}'] + r_scale * scale[f'{key}']\n", - " merge[f'{key}'] = ((merge[f'{key}'] + noise)/W).to(device = device,dtype=torch.float16)\n", - " #-------#\n", - " score = torch.zeros(1).to(device = device, dtype=torch.float32)\n", - " #----#\n", - " NUM_ITERS = 10\n", - " for iter in range(NUM_ITERS):\n", - " for key in doll:\n", - " tgt_dim = doll[f'{key}'].shape\n", - " if tgt_dim == torch.Size([]): continue\n", - " vec = torch.rand(tgt_dim[0]).to(device = device,dtype=torch.float16)\n", - " cgi_out = torch.matmul(vec , cgi[f'{key}']).to(device = device,dtype=torch.float16)\n", - " doll_out = torch.matmul(vec , doll[f'{key}']).to(device = device,dtype=torch.float16)\n", - " euro_out = torch.matmul(vec , euro[f'{key}']).to(device = device,dtype=torch.float16)\n", - " guns_out = torch.matmul(vec , guns[f'{key}']).to(device = device,dtype=torch.float16)\n", - " iris_out = torch.matmul(vec , iris[f'{key}']).to(device = device,dtype=torch.float16)\n", - " scale_out = torch.matmul(vec , scale[f'{key}']).to(device = device,dtype=torch.float16)\n", - " merge_out = torch.matmul(vec , merge[f'{key}']).to(device = device,dtype=torch.float16)\n", - " #-------#\n", - " sim_value_cgi = torch.abs(cos0(cgi_out , merge_out)).to(device = device,dtype=torch.float32)*SCALE\n", - " sim_value_doll = torch.abs(cos0(doll_out , merge_out)).to(device = device,dtype=torch.float32)*SCALE\n", - " sim_value_euro = torch.abs(cos0(euro_out , merge_out)).to(device = device,dtype=torch.float32)*SCALE\n", - " sim_value_guns = torch.abs(cos0(guns_out , merge_out)).to(device = device,dtype=torch.float32)*SCALE\n", - " sim_value_iris = torch.abs(cos0(iris_out , merge_out)).to(device = device,dtype=torch.float32)*SCALE\n", - " sim_value_scale = torch.abs(cos0(scale_out , merge_out)).to(device = device,dtype=torch.float32)*SCALE\n", - " score = score + SCALE*(sim_value_cgi + 2*sim_value_doll + 2*sim_value_euro + sim_value_guns + 2*sim_value_iris + sim_value_scale)/9 #<--- This score can be anything at all\n", - " #----#\n", - " #-----#\n", - "\n", - " final_score = (1000/(NUM_ITERS * SCALE))*score.to(device = 'cpu' , dtype=torch.float32).item()\n", - " if (final_score>highest_score) :\n", - " highest_score = final_score\n", - " print('new highscore!')\n", - " print(f'score : {final_score} pts')\n", - " #------#\n", - " save_file(merge , 'all_merge_R4.safetensors')\n", - " #------#\n", - "\n", - "print(f'------------')\n", - "print(f'Final score : {highest_score} pts')\n", - "\n", - "\n", - "#all R1 23.190992578747682\n", - "\n", - "#all R2 23.333244826062582\n", - "\n", - "#all R3 23.34471355425194\n", - "\n", - "#all R4 23.402637452818453" - ], - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/", - "height": 1000 - }, - "id": "9L_g5Zp9Du2E", - "outputId": "a3aa2bde-061e-43f5-ca35-96bdc470be80" - }, - "execution_count": null, - "outputs": [ - { - "output_type": "stream", - "name": "stdout", - "text": [ - "attempt no : 1 \n", - "new highscore!\n", - "score : 23.264414267032407 pts\n", - "attempt no : 2 \n", - "attempt no : 3 \n", - "attempt no : 4 \n", - "new highscore!\n", - "score : 23.29399467271287 pts\n", - "attempt no : 5 \n", - "attempt no : 6 \n", - "attempt no : 7 \n", - "attempt no : 8 \n", - "attempt no : 9 \n", - "attempt no : 10 \n", - "attempt no : 11 \n", - "new highscore!\n", - "score : 23.362628780887462 pts\n", - "attempt no : 12 \n", - "attempt no : 13 \n", - "attempt no : 14 \n", - "attempt no : 15 \n", - "attempt no : 16 \n", - "attempt no : 17 \n", - "attempt no : 18 \n", - "attempt no : 19 \n", - "attempt no : 20 \n", - "attempt no : 21 \n", - "attempt no : 22 \n", - "attempt no : 23 \n", - "new highscore!\n", - "score : 23.37011210329365 pts\n", - "attempt no : 24 \n", - "attempt no : 25 \n", - "attempt no : 26 \n", - "attempt no : 27 \n", - "attempt no : 28 \n", - "attempt no : 29 \n", - "attempt no : 30 \n", - "attempt no : 31 \n", - "attempt no : 32 \n", - "attempt no : 33 \n", - "attempt no : 34 \n", - "new highscore!\n", - "score : 23.402637452818453 pts\n", - "attempt no : 35 \n", - "attempt no : 36 \n", - "attempt no : 37 \n", - "attempt no : 38 \n", - "attempt no : 39 \n", - "attempt no : 40 \n", - "attempt no : 41 \n", - "attempt no : 42 \n", - "attempt no : 43 \n", - "attempt no : 44 \n", - "attempt no : 45 \n", - "attempt no : 46 \n", - "attempt no : 47 \n", - "attempt no : 48 \n", - "attempt no : 49 \n", - "attempt no : 50 \n", - "attempt no : 51 \n", - "attempt no : 52 \n", - "attempt no : 53 \n", - "attempt no : 54 \n", - "attempt no : 55 \n", - "attempt no : 56 \n", - "attempt no : 57 \n", - "attempt no : 58 \n", - "attempt no : 59 \n", - "attempt no : 60 \n", - "attempt no : 61 \n", - "attempt no : 62 \n", - "attempt no : 63 \n", - "attempt no : 64 \n", - "attempt no : 65 \n", - "attempt no : 66 \n", - "attempt no : 67 \n", - "attempt no : 68 \n", - "attempt no : 69 \n", - "attempt no : 70 \n", - "attempt no : 71 \n", - "attempt no : 72 \n", - "attempt no : 73 \n", - "attempt no : 74 \n", - "attempt no : 75 \n", - "attempt no : 76 \n", - "attempt no : 77 \n", - "attempt no : 78 \n", - "attempt no : 79 \n", - "attempt no : 80 \n", - "attempt no : 81 \n", - "attempt no : 82 \n", - "attempt no : 83 \n", - "attempt no : 84 \n", - "attempt no : 85 \n", - "attempt no : 86 \n" - ] - }, - { - "output_type": "error", - "ename": "KeyboardInterrupt", - "evalue": "", - "traceback": [ - "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", - "\u001b[0;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)", - "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 61\u001b[0m \u001b[0mtgt_dim\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mdoll\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34mf'{key}'\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mshape\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 62\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mtgt_dim\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0mtorch\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mSize\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0;32mcontinue\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 63\u001b[0;31m \u001b[0mvec\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtorch\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrand\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtgt_dim\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mto\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdevice\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mdevice\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mdtype\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mtorch\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mfloat16\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 64\u001b[0m \u001b[0mcgi_out\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtorch\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmatmul\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mvec\u001b[0m \u001b[0;34m,\u001b[0m \u001b[0mcgi\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34mf'{key}'\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mto\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdevice\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mdevice\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mdtype\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mtorch\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mfloat16\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 65\u001b[0m \u001b[0mdoll_out\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtorch\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmatmul\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mvec\u001b[0m \u001b[0;34m,\u001b[0m \u001b[0mdoll\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34mf'{key}'\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mto\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdevice\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mdevice\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mdtype\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mtorch\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mfloat16\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;31mKeyboardInterrupt\u001b[0m: " - ] - } - ] - }, - { - "cell_type": "code", - "source": [ - " for key in doll:\n", - " if final_score<38.5: break\n", - " _w_offset = w_offset\n", - " W = (w1+w2+w3 + w_noise + _w_offset)*torch.ones(1).to(device = device,dtype=torch.float16)\n", - " tgt_dim = doll[f'{key}'].shape\n", - " if tgt_dim == torch.Size([]): continue\n", - " fixed_noise[f'{key}'] = fixed_noise[f'{key}'] + merge[f'{key}']\n", - " fixed_noise[f'{key}'] = (fixed_noise[f'{key}'] * (w_offset*torch.ones(1).to(device = device,dtype=torch.float16)/LA.matrix_norm(fixed_noise[f'{key}']))).to(device = device,dtype=torch.float16)" - ], - "metadata": { - "id": "jWFHMJN6TqDq" - }, - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "code", - "source": [ - " vec = torch.rand(tgt_dim[0]).to(dtype=torch.float16)\n", - " same = torch.abs(cos0(vec ,vec))" - ], - "metadata": { - "id": "k7Pq-kDbuNnQ" - }, - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "code", - "source": [ - "same" - ], - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "ANBPfP7tuOoa", - "outputId": "24300487-f874-4f1b-beb7-0f441ec7df4a" - }, - "execution_count": null, - "outputs": [ - { - "output_type": "execute_result", - "data": { - "text/plain": [ - "tensor(1., dtype=torch.float16)" - ] - }, - "metadata": {}, - "execution_count": 65 - } - ] - }, - { - "cell_type": "code", - "source": [ - "torch.ones(1).to(dtype=torch.float16)" - ], - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "zN92j8JJuQ6G", - "outputId": "b810f4e6-a8f3-426a-ae52-ffbd44fb3f00" - }, - "execution_count": null, - "outputs": [ - { - "output_type": "execute_result", - "data": { - "text/plain": [ - "tensor([1.], dtype=torch.float16)" - ] - }, - "metadata": {}, - "execution_count": 66 - } - ] - }, - { - "cell_type": "code", - "source": [ - "\n", - "\n", - "\n", - "\n", - "\n" - ], - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "py-JMJzhsAI4", - "outputId": "207cd809-031c-48e3-af0a-98bc114d910e" - }, - "execution_count": null, - "outputs": [ - { - "output_type": "stream", - "name": "stdout", - "text": [ - "score : 45.8125 pts\n" - ] - } - ] - }, - { - "cell_type": "code", - "source": [ - "%cd /content/\n", - "save_file(merge , 'doll_euro_scale_R_merge.safetensors')" - ], - "metadata": { - "id": "7qogsYsAr2QU" - }, - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "code", - "source": [], - "metadata": { - "id": "9wzLwurSpwpL" - }, - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "code", - "source": [ - "test = torch.rand(tgt_dim)\n", - "vec = torch.rand(tgt_dim[0])" - ], - "metadata": { - "id": "DHdy4DptowYG" - }, - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "code", - "source": [ - "tgt_dim[0]" - ], - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "WeNJ0bquphtx", - "outputId": "442bfb2e-c1ab-4549-a4ea-ca80d3cc9a7d" - }, - "execution_count": null, - "outputs": [ - { - "output_type": "execute_result", - "data": { - "text/plain": [ - "9216" - ] - }, - "metadata": {}, - "execution_count": 46 - } - ] - }, - { - "cell_type": "code", - "source": [ - "(torch.matmul(vec,test)).shape" - ], - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "xqZp3Xo8pQuW", - "outputId": "68e5c25e-3391-45e7-9c73-45e0174ddbc1" - }, - "execution_count": null, - "outputs": [ - { - "output_type": "execute_result", - "data": { - "text/plain": [ - "torch.Size([64])" - ] - }, - "metadata": {}, - "execution_count": 48 - } - ] - }, - { - "cell_type": "code", - "source": [ - "tgt_dim = torch.Size([64, 3072])\n", - "cosa = nn.CosineSimilarity(dim=0)\n", - "cos_dim1 = nn.CosineSimilarity(dim=1)\n", - "\n", - "for key in cgi:\n", - " if not cgi[f'{key}'].shape == torch.Size([64, 3072]): continue\n", - " print(f'{key} : ')\n", - " print(torch.sum(torch.abs(cos_dim1(cgi[f'{key}'] , iris[f'{key}']))))" - ], - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "VFNw0Nck8V6Q", - "outputId": "e48bab98-18f7-43bb-d1cf-89f3e00f7ccf" - }, - "execution_count": null, - "outputs": [ - { - "output_type": "stream", - "name": "stdout", - "text": [ - "lora_unet_double_blocks_0_img_attn_proj.lora_down.weight : \n", - "tensor(1.6982, dtype=torch.float16)\n", - "lora_unet_double_blocks_0_img_attn_qkv.lora_down.weight : \n", - "tensor(1.8145, dtype=torch.float16)\n", - "lora_unet_double_blocks_0_img_mlp_0.lora_down.weight : \n", - "tensor(1.6309, dtype=torch.float16)\n", - "lora_unet_double_blocks_0_img_mod_lin.lora_down.weight : \n", - "tensor(2.6211, dtype=torch.float16)\n", - "lora_unet_double_blocks_0_txt_attn_proj.lora_down.weight : \n", - "tensor(2.3203, dtype=torch.float16)\n", - "lora_unet_double_blocks_0_txt_attn_qkv.lora_down.weight : \n", - "tensor(2.3027, dtype=torch.float16)\n", - "lora_unet_double_blocks_0_txt_mlp_0.lora_down.weight : \n", - "tensor(2.5898, dtype=torch.float16)\n", - "lora_unet_double_blocks_0_txt_mod_lin.lora_down.weight : \n", - "tensor(2.7402, dtype=torch.float16)\n", - "lora_unet_double_blocks_10_img_attn_proj.lora_down.weight : \n", - "tensor(2.0410, dtype=torch.float16)\n", - "lora_unet_double_blocks_10_img_attn_qkv.lora_down.weight : \n", - "tensor(1.3350, dtype=torch.float16)\n", - "lora_unet_double_blocks_10_img_mlp_0.lora_down.weight : \n", - "tensor(2.0020, dtype=torch.float16)\n", - "lora_unet_double_blocks_10_img_mod_lin.lora_down.weight : \n", - "tensor(2.6562, dtype=torch.float16)\n", - "lora_unet_double_blocks_10_txt_attn_proj.lora_down.weight : \n", - "tensor(1.1816, dtype=torch.float16)\n", - "lora_unet_double_blocks_10_txt_attn_qkv.lora_down.weight : \n", - "tensor(1.1348, dtype=torch.float16)\n", - "lora_unet_double_blocks_10_txt_mlp_0.lora_down.weight : \n", - "tensor(3.0156, dtype=torch.float16)\n", - "lora_unet_double_blocks_10_txt_mod_lin.lora_down.weight : \n", - "tensor(1.4746, dtype=torch.float16)\n", - "lora_unet_double_blocks_11_img_attn_proj.lora_down.weight : \n", - "tensor(1.8359, dtype=torch.float16)\n", - "lora_unet_double_blocks_11_img_attn_qkv.lora_down.weight : \n", - "tensor(1.5312, dtype=torch.float16)\n", - "lora_unet_double_blocks_11_img_mlp_0.lora_down.weight : \n", - "tensor(2.1465, dtype=torch.float16)\n", - "lora_unet_double_blocks_11_img_mod_lin.lora_down.weight : \n", - "tensor(3.9277, dtype=torch.float16)\n", - "lora_unet_double_blocks_11_txt_attn_proj.lora_down.weight : \n", - "tensor(1.7246, dtype=torch.float16)\n", - "lora_unet_double_blocks_11_txt_attn_qkv.lora_down.weight : \n", - "tensor(1.8594, dtype=torch.float16)\n", - "lora_unet_double_blocks_11_txt_mlp_0.lora_down.weight : \n", - "tensor(3.6465, dtype=torch.float16)\n", - "lora_unet_double_blocks_11_txt_mod_lin.lora_down.weight : \n", - "tensor(2.6152, dtype=torch.float16)\n", - "lora_unet_double_blocks_12_img_attn_proj.lora_down.weight : \n", - "tensor(1.7295, dtype=torch.float16)\n", - "lora_unet_double_blocks_12_img_attn_qkv.lora_down.weight : \n", - "tensor(1.4795, dtype=torch.float16)\n", - "lora_unet_double_blocks_12_img_mlp_0.lora_down.weight : \n", - "tensor(3.4043, dtype=torch.float16)\n", - "lora_unet_double_blocks_12_img_mod_lin.lora_down.weight : \n", - "tensor(2.0137, dtype=torch.float16)\n", - "lora_unet_double_blocks_12_txt_attn_proj.lora_down.weight : \n", - "tensor(1.4375, dtype=torch.float16)\n", - "lora_unet_double_blocks_12_txt_attn_qkv.lora_down.weight : \n", - "tensor(1.8994, dtype=torch.float16)\n", - "lora_unet_double_blocks_12_txt_mlp_0.lora_down.weight : \n", - "tensor(2.1152, dtype=torch.float16)\n", - "lora_unet_double_blocks_12_txt_mod_lin.lora_down.weight : \n", - "tensor(1.2744, dtype=torch.float16)\n", - "lora_unet_double_blocks_13_img_attn_proj.lora_down.weight : \n", - "tensor(3.0742, dtype=torch.float16)\n", - "lora_unet_double_blocks_13_img_attn_qkv.lora_down.weight : \n", - "tensor(1.4980, dtype=torch.float16)\n", - "lora_unet_double_blocks_13_img_mlp_0.lora_down.weight : \n", - "tensor(1.9609, dtype=torch.float16)\n", - "lora_unet_double_blocks_13_img_mod_lin.lora_down.weight : \n", - "tensor(2.6133, dtype=torch.float16)\n", - "lora_unet_double_blocks_13_txt_attn_proj.lora_down.weight : \n", - "tensor(1.6904, dtype=torch.float16)\n", - "lora_unet_double_blocks_13_txt_attn_qkv.lora_down.weight : \n", - "tensor(2.1680, dtype=torch.float16)\n", - "lora_unet_double_blocks_13_txt_mlp_0.lora_down.weight : \n", - "tensor(2.8574, dtype=torch.float16)\n", - "lora_unet_double_blocks_13_txt_mod_lin.lora_down.weight : \n", - "tensor(1.9053, dtype=torch.float16)\n", - "lora_unet_double_blocks_14_img_attn_proj.lora_down.weight : \n", - "tensor(1.8135, dtype=torch.float16)\n", - "lora_unet_double_blocks_14_img_attn_qkv.lora_down.weight : \n", - "tensor(1.4033, dtype=torch.float16)\n", - "lora_unet_double_blocks_14_img_mlp_0.lora_down.weight : \n", - "tensor(1.5547, dtype=torch.float16)\n", - "lora_unet_double_blocks_14_img_mod_lin.lora_down.weight : \n", - "tensor(2.8906, dtype=torch.float16)\n", - "lora_unet_double_blocks_14_txt_attn_proj.lora_down.weight : \n", - "tensor(1.1328, dtype=torch.float16)\n", - "lora_unet_double_blocks_14_txt_attn_qkv.lora_down.weight : \n", - "tensor(1.3701, dtype=torch.float16)\n", - "lora_unet_double_blocks_14_txt_mlp_0.lora_down.weight : \n", - "tensor(3.3145, dtype=torch.float16)\n", - "lora_unet_double_blocks_14_txt_mod_lin.lora_down.weight : \n", - "tensor(1.2031, dtype=torch.float16)\n", - "lora_unet_double_blocks_15_img_attn_proj.lora_down.weight : \n", - "tensor(1.5137, dtype=torch.float16)\n", - "lora_unet_double_blocks_15_img_attn_qkv.lora_down.weight : \n", - "tensor(1.3809, dtype=torch.float16)\n", - "lora_unet_double_blocks_15_img_mlp_0.lora_down.weight : \n", - "tensor(1.4834, dtype=torch.float16)\n", - "lora_unet_double_blocks_15_img_mod_lin.lora_down.weight : \n", - "tensor(1.6465, dtype=torch.float16)\n", - "lora_unet_double_blocks_15_txt_attn_proj.lora_down.weight : \n", - "tensor(1.7256, dtype=torch.float16)\n", - "lora_unet_double_blocks_15_txt_attn_qkv.lora_down.weight : \n", - "tensor(2.8672, dtype=torch.float16)\n", - "lora_unet_double_blocks_15_txt_mlp_0.lora_down.weight : \n", - "tensor(2.1953, dtype=torch.float16)\n", - "lora_unet_double_blocks_15_txt_mod_lin.lora_down.weight : \n", - "tensor(0.9858, dtype=torch.float16)\n", - "lora_unet_double_blocks_16_img_attn_proj.lora_down.weight : \n", - "tensor(1.5703, dtype=torch.float16)\n", - "lora_unet_double_blocks_16_img_attn_qkv.lora_down.weight : \n", - "tensor(1.4648, dtype=torch.float16)\n", - "lora_unet_double_blocks_16_img_mlp_0.lora_down.weight : \n", - "tensor(1.5537, dtype=torch.float16)\n", - "lora_unet_double_blocks_16_img_mod_lin.lora_down.weight : \n", - "tensor(2.6133, dtype=torch.float16)\n", - "lora_unet_double_blocks_16_txt_attn_proj.lora_down.weight : \n", - "tensor(2.2559, dtype=torch.float16)\n", - "lora_unet_double_blocks_16_txt_attn_qkv.lora_down.weight : \n", - "tensor(1.9365, dtype=torch.float16)\n", - "lora_unet_double_blocks_16_txt_mlp_0.lora_down.weight : \n", - "tensor(2.7891, dtype=torch.float16)\n", - "lora_unet_double_blocks_16_txt_mod_lin.lora_down.weight : \n", - "tensor(1.3174, dtype=torch.float16)\n", - "lora_unet_double_blocks_17_img_attn_proj.lora_down.weight : \n", - "tensor(2.4609, dtype=torch.float16)\n", - "lora_unet_double_blocks_17_img_attn_qkv.lora_down.weight : \n", - "tensor(1.6240, dtype=torch.float16)\n", - "lora_unet_double_blocks_17_img_mlp_0.lora_down.weight : \n", - "tensor(3.1406, dtype=torch.float16)\n", - "lora_unet_double_blocks_17_img_mod_lin.lora_down.weight : \n", - "tensor(2.1055, dtype=torch.float16)\n", - "lora_unet_double_blocks_17_txt_attn_proj.lora_down.weight : \n", - "tensor(1.7480, dtype=torch.float16)\n", - "lora_unet_double_blocks_17_txt_attn_qkv.lora_down.weight : \n", - "tensor(1.6436, dtype=torch.float16)\n", - "lora_unet_double_blocks_17_txt_mlp_0.lora_down.weight : \n", - "tensor(1.9688, dtype=torch.float16)\n", - "lora_unet_double_blocks_17_txt_mod_lin.lora_down.weight : \n", - "tensor(1.8184, dtype=torch.float16)\n", - "lora_unet_double_blocks_18_img_attn_proj.lora_down.weight : \n", - "tensor(2.3887, dtype=torch.float16)\n", - "lora_unet_double_blocks_18_img_attn_qkv.lora_down.weight : \n", - "tensor(1.6738, dtype=torch.float16)\n", - "lora_unet_double_blocks_18_img_mlp_0.lora_down.weight : \n", - "tensor(3.7500, dtype=torch.float16)\n", - "lora_unet_double_blocks_18_img_mod_lin.lora_down.weight : \n", - "tensor(2.7285, dtype=torch.float16)\n", - "lora_unet_double_blocks_18_txt_attn_proj.lora_down.weight : \n", - "tensor(2.0410, dtype=torch.float16)\n", - "lora_unet_double_blocks_18_txt_attn_qkv.lora_down.weight : \n", - "tensor(2.0586, dtype=torch.float16)\n", - "lora_unet_double_blocks_18_txt_mlp_0.lora_down.weight : \n", - "tensor(2.0801, dtype=torch.float16)\n", - "lora_unet_double_blocks_18_txt_mod_lin.lora_down.weight : \n", - "tensor(1.5684, dtype=torch.float16)\n", - "lora_unet_double_blocks_1_img_attn_proj.lora_down.weight : \n", - "tensor(4.9844, dtype=torch.float16)\n", - "lora_unet_double_blocks_1_img_attn_qkv.lora_down.weight : \n", - "tensor(1.8613, dtype=torch.float16)\n", - "lora_unet_double_blocks_1_img_mlp_0.lora_down.weight : \n", - "tensor(2.2266, dtype=torch.float16)\n", - "lora_unet_double_blocks_1_img_mod_lin.lora_down.weight : \n", - "tensor(2.8164, dtype=torch.float16)\n", - "lora_unet_double_blocks_1_txt_attn_proj.lora_down.weight : \n", - "tensor(1.7500, dtype=torch.float16)\n", - "lora_unet_double_blocks_1_txt_attn_qkv.lora_down.weight : \n", - "tensor(2.3105, dtype=torch.float16)\n", - "lora_unet_double_blocks_1_txt_mlp_0.lora_down.weight : \n", - "tensor(1.9639, dtype=torch.float16)\n", - "lora_unet_double_blocks_1_txt_mod_lin.lora_down.weight : \n", - "tensor(2.6504, dtype=torch.float16)\n", - "lora_unet_double_blocks_2_img_attn_proj.lora_down.weight : \n", - "tensor(4.6367, dtype=torch.float16)\n", - "lora_unet_double_blocks_2_img_attn_qkv.lora_down.weight : \n", - "tensor(1.7988, dtype=torch.float16)\n", - "lora_unet_double_blocks_2_img_mlp_0.lora_down.weight : \n", - "tensor(4.6758, dtype=torch.float16)\n", - "lora_unet_double_blocks_2_img_mod_lin.lora_down.weight : \n", - "tensor(3.1445, dtype=torch.float16)\n", - "lora_unet_double_blocks_2_txt_attn_proj.lora_down.weight : \n", - "tensor(2.2285, dtype=torch.float16)\n", - "lora_unet_double_blocks_2_txt_attn_qkv.lora_down.weight : \n", - "tensor(1.4990, dtype=torch.float16)\n", - "lora_unet_double_blocks_2_txt_mlp_0.lora_down.weight : \n", - "tensor(2.3984, dtype=torch.float16)\n", - "lora_unet_double_blocks_2_txt_mod_lin.lora_down.weight : \n", - "tensor(1.4443, dtype=torch.float16)\n", - "lora_unet_double_blocks_3_img_attn_proj.lora_down.weight : \n", - "tensor(3.6855, dtype=torch.float16)\n", - "lora_unet_double_blocks_3_img_attn_qkv.lora_down.weight : \n", - "tensor(1.9971, dtype=torch.float16)\n", - "lora_unet_double_blocks_3_img_mlp_0.lora_down.weight : \n", - "tensor(3.3301, dtype=torch.float16)\n", - "lora_unet_double_blocks_3_img_mod_lin.lora_down.weight : \n", - "tensor(2.3379, dtype=torch.float16)\n", - "lora_unet_double_blocks_3_txt_attn_proj.lora_down.weight : \n", - "tensor(2.0117, dtype=torch.float16)\n", - "lora_unet_double_blocks_3_txt_attn_qkv.lora_down.weight : \n", - "tensor(2.1621, dtype=torch.float16)\n", - "lora_unet_double_blocks_3_txt_mlp_0.lora_down.weight : \n", - "tensor(2.7676, dtype=torch.float16)\n", - "lora_unet_double_blocks_3_txt_mod_lin.lora_down.weight : \n", - "tensor(3.1895, dtype=torch.float16)\n", - "lora_unet_double_blocks_4_img_attn_proj.lora_down.weight : \n", - "tensor(2.3848, dtype=torch.float16)\n", - "lora_unet_double_blocks_4_img_attn_qkv.lora_down.weight : \n", - "tensor(1.7783, dtype=torch.float16)\n", - "lora_unet_double_blocks_4_img_mlp_0.lora_down.weight : \n", - "tensor(2.0234, dtype=torch.float16)\n", - "lora_unet_double_blocks_4_img_mod_lin.lora_down.weight : \n", - "tensor(1.9082, dtype=torch.float16)\n", - "lora_unet_double_blocks_4_txt_attn_proj.lora_down.weight : \n", - "tensor(1.7588, dtype=torch.float16)\n", - "lora_unet_double_blocks_4_txt_attn_qkv.lora_down.weight : \n", - "tensor(2.9902, dtype=torch.float16)\n", - "lora_unet_double_blocks_4_txt_mlp_0.lora_down.weight : \n", - "tensor(1.5859, dtype=torch.float16)\n", - "lora_unet_double_blocks_4_txt_mod_lin.lora_down.weight : \n", - "tensor(1.5654, dtype=torch.float16)\n", - "lora_unet_double_blocks_5_img_attn_proj.lora_down.weight : \n", - "tensor(2.7402, dtype=torch.float16)\n", - "lora_unet_double_blocks_5_img_attn_qkv.lora_down.weight : \n", - "tensor(1.6221, dtype=torch.float16)\n", - "lora_unet_double_blocks_5_img_mlp_0.lora_down.weight : \n", - "tensor(1.6318, dtype=torch.float16)\n", - "lora_unet_double_blocks_5_img_mod_lin.lora_down.weight : \n", - "tensor(1.7988, dtype=torch.float16)\n", - "lora_unet_double_blocks_5_txt_attn_proj.lora_down.weight : \n", - "tensor(1.1699, dtype=torch.float16)\n", - "lora_unet_double_blocks_5_txt_attn_qkv.lora_down.weight : \n", - "tensor(3.5566, dtype=torch.float16)\n", - "lora_unet_double_blocks_5_txt_mlp_0.lora_down.weight : \n", - "tensor(1.5791, dtype=torch.float16)\n", - "lora_unet_double_blocks_5_txt_mod_lin.lora_down.weight : \n", - "tensor(1.5547, dtype=torch.float16)\n", - "lora_unet_double_blocks_6_img_attn_proj.lora_down.weight : \n", - "tensor(1.7988, dtype=torch.float16)\n", - "lora_unet_double_blocks_6_img_attn_qkv.lora_down.weight : \n", - "tensor(1.4531, dtype=torch.float16)\n", - "lora_unet_double_blocks_6_img_mlp_0.lora_down.weight : \n", - "tensor(2.4141, dtype=torch.float16)\n", - "lora_unet_double_blocks_6_img_mod_lin.lora_down.weight : \n", - "tensor(6.0234, dtype=torch.float16)\n", - "lora_unet_double_blocks_6_txt_attn_proj.lora_down.weight : \n", - "tensor(1.0068, dtype=torch.float16)\n", - "lora_unet_double_blocks_6_txt_attn_qkv.lora_down.weight : \n", - "tensor(2.0098, dtype=torch.float16)\n", - "lora_unet_double_blocks_6_txt_mlp_0.lora_down.weight : \n", - "tensor(4.0312, dtype=torch.float16)\n", - "lora_unet_double_blocks_6_txt_mod_lin.lora_down.weight : \n", - "tensor(2.6309, dtype=torch.float16)\n", - "lora_unet_double_blocks_7_img_attn_proj.lora_down.weight : \n", - "tensor(1.4814, dtype=torch.float16)\n", - "lora_unet_double_blocks_7_img_attn_qkv.lora_down.weight : \n", - "tensor(1.4854, dtype=torch.float16)\n", - "lora_unet_double_blocks_7_img_mlp_0.lora_down.weight : \n", - "tensor(1.3877, dtype=torch.float16)\n", - "lora_unet_double_blocks_7_img_mod_lin.lora_down.weight : \n", - "tensor(2.3125, dtype=torch.float16)\n", - "lora_unet_double_blocks_7_txt_attn_proj.lora_down.weight : \n", - "tensor(3.4746, dtype=torch.float16)\n", - "lora_unet_double_blocks_7_txt_attn_qkv.lora_down.weight : \n", - "tensor(2.0430, dtype=torch.float16)\n", - "lora_unet_double_blocks_7_txt_mlp_0.lora_down.weight : \n", - "tensor(1.8018, dtype=torch.float16)\n", - "lora_unet_double_blocks_7_txt_mod_lin.lora_down.weight : \n", - "tensor(1.1709, dtype=torch.float16)\n", - "lora_unet_double_blocks_8_img_attn_proj.lora_down.weight : \n", - "tensor(1.8857, dtype=torch.float16)\n", - "lora_unet_double_blocks_8_img_attn_qkv.lora_down.weight : \n", - "tensor(1.8848, dtype=torch.float16)\n", - "lora_unet_double_blocks_8_img_mlp_0.lora_down.weight : \n", - "tensor(1.7627, dtype=torch.float16)\n", - "lora_unet_double_blocks_8_img_mod_lin.lora_down.weight : \n", - "tensor(4.2852, dtype=torch.float16)\n", - "lora_unet_double_blocks_8_txt_attn_proj.lora_down.weight : \n", - "tensor(1.3887, dtype=torch.float16)\n", - "lora_unet_double_blocks_8_txt_attn_qkv.lora_down.weight : \n", - "tensor(1.6289, dtype=torch.float16)\n", - "lora_unet_double_blocks_8_txt_mlp_0.lora_down.weight : \n", - "tensor(2.2188, dtype=torch.float16)\n", - "lora_unet_double_blocks_8_txt_mod_lin.lora_down.weight : \n", - "tensor(1.5742, dtype=torch.float16)\n", - "lora_unet_double_blocks_9_img_attn_proj.lora_down.weight : \n", - "tensor(2.3125, dtype=torch.float16)\n", - "lora_unet_double_blocks_9_img_attn_qkv.lora_down.weight : \n", - "tensor(1.4854, dtype=torch.float16)\n", - "lora_unet_double_blocks_9_img_mlp_0.lora_down.weight : \n", - "tensor(1.9492, dtype=torch.float16)\n", - "lora_unet_double_blocks_9_img_mod_lin.lora_down.weight : \n", - "tensor(2.2949, dtype=torch.float16)\n", - "lora_unet_double_blocks_9_txt_attn_proj.lora_down.weight : \n", - "tensor(2.0781, dtype=torch.float16)\n", - "lora_unet_double_blocks_9_txt_attn_qkv.lora_down.weight : \n", - "tensor(2.6172, dtype=torch.float16)\n", - "lora_unet_double_blocks_9_txt_mlp_0.lora_down.weight : \n", - "tensor(3.1367, dtype=torch.float16)\n", - "lora_unet_double_blocks_9_txt_mod_lin.lora_down.weight : \n", - "tensor(1.2451, dtype=torch.float16)\n", - "lora_unet_single_blocks_0_linear1.lora_down.weight : \n", - "tensor(2.4375, dtype=torch.float16)\n", - "lora_unet_single_blocks_0_modulation_lin.lora_down.weight : \n", - "tensor(3.5684, dtype=torch.float16)\n", - "lora_unet_single_blocks_10_linear1.lora_down.weight : \n", - "tensor(2.6328, dtype=torch.float16)\n", - "lora_unet_single_blocks_10_modulation_lin.lora_down.weight : \n", - "tensor(2.9961, dtype=torch.float16)\n", - "lora_unet_single_blocks_11_linear1.lora_down.weight : \n", - "tensor(3.1211, dtype=torch.float16)\n", - "lora_unet_single_blocks_11_modulation_lin.lora_down.weight : \n", - "tensor(3.3672, dtype=torch.float16)\n", - "lora_unet_single_blocks_12_linear1.lora_down.weight : \n", - "tensor(3.0293, dtype=torch.float16)\n", - "lora_unet_single_blocks_12_modulation_lin.lora_down.weight : \n", - "tensor(3.6602, dtype=torch.float16)\n", - "lora_unet_single_blocks_13_linear1.lora_down.weight : \n", - "tensor(2.5918, dtype=torch.float16)\n", - "lora_unet_single_blocks_13_modulation_lin.lora_down.weight : \n", - "tensor(4.6367, dtype=torch.float16)\n", - "lora_unet_single_blocks_14_linear1.lora_down.weight : \n", - "tensor(2.0215, dtype=torch.float16)\n", - "lora_unet_single_blocks_14_modulation_lin.lora_down.weight : \n", - "tensor(3.5371, dtype=torch.float16)\n", - "lora_unet_single_blocks_15_linear1.lora_down.weight : \n", - "tensor(2.1719, dtype=torch.float16)\n", - "lora_unet_single_blocks_15_modulation_lin.lora_down.weight : \n", - "tensor(4.2812, dtype=torch.float16)\n", - "lora_unet_single_blocks_16_linear1.lora_down.weight : \n", - "tensor(2.1992, dtype=torch.float16)\n", - "lora_unet_single_blocks_16_modulation_lin.lora_down.weight : \n", - "tensor(4.1094, dtype=torch.float16)\n", - "lora_unet_single_blocks_17_linear1.lora_down.weight : \n", - "tensor(2.0703, dtype=torch.float16)\n", - "lora_unet_single_blocks_17_modulation_lin.lora_down.weight : \n", - "tensor(2.9277, dtype=torch.float16)\n", - "lora_unet_single_blocks_18_linear1.lora_down.weight : \n", - "tensor(2.0371, dtype=torch.float16)\n", - "lora_unet_single_blocks_18_modulation_lin.lora_down.weight : \n", - "tensor(2.6133, dtype=torch.float16)\n", - "lora_unet_single_blocks_19_linear1.lora_down.weight : \n", - "tensor(2.0723, dtype=torch.float16)\n", - "lora_unet_single_blocks_19_modulation_lin.lora_down.weight : \n", - "tensor(3.4980, dtype=torch.float16)\n", - "lora_unet_single_blocks_1_linear1.lora_down.weight : \n", - "tensor(1.7432, dtype=torch.float16)\n", - "lora_unet_single_blocks_1_modulation_lin.lora_down.weight : \n", - "tensor(2.3848, dtype=torch.float16)\n", - "lora_unet_single_blocks_20_linear1.lora_down.weight : \n", - "tensor(2.0137, dtype=torch.float16)\n", - "lora_unet_single_blocks_20_modulation_lin.lora_down.weight : \n", - "tensor(2.8203, dtype=torch.float16)\n", - "lora_unet_single_blocks_21_linear1.lora_down.weight : \n", - "tensor(1.8955, dtype=torch.float16)\n", - "lora_unet_single_blocks_21_modulation_lin.lora_down.weight : \n", - "tensor(2.7305, dtype=torch.float16)\n", - "lora_unet_single_blocks_22_linear1.lora_down.weight : \n", - "tensor(2.7559, dtype=torch.float16)\n", - "lora_unet_single_blocks_22_modulation_lin.lora_down.weight : \n", - "tensor(4.6133, dtype=torch.float16)\n", - "lora_unet_single_blocks_23_linear1.lora_down.weight : \n", - "tensor(2.5508, dtype=torch.float16)\n", - "lora_unet_single_blocks_23_modulation_lin.lora_down.weight : \n", - "tensor(4.4180, dtype=torch.float16)\n", - "lora_unet_single_blocks_24_linear1.lora_down.weight : \n", - "tensor(1.9219, dtype=torch.float16)\n", - "lora_unet_single_blocks_24_modulation_lin.lora_down.weight : \n", - "tensor(2.9453, dtype=torch.float16)\n", - "lora_unet_single_blocks_25_linear1.lora_down.weight : \n", - "tensor(2.7539, dtype=torch.float16)\n", - "lora_unet_single_blocks_25_modulation_lin.lora_down.weight : \n", - "tensor(4.5938, dtype=torch.float16)\n", - "lora_unet_single_blocks_26_linear1.lora_down.weight : \n", - "tensor(3.3750, dtype=torch.float16)\n", - "lora_unet_single_blocks_26_modulation_lin.lora_down.weight : \n", - "tensor(4.7344, dtype=torch.float16)\n", - "lora_unet_single_blocks_27_linear1.lora_down.weight : \n", - "tensor(2.3809, dtype=torch.float16)\n", - "lora_unet_single_blocks_27_modulation_lin.lora_down.weight : \n", - "tensor(4.9883, dtype=torch.float16)\n", - "lora_unet_single_blocks_28_linear1.lora_down.weight : \n", - "tensor(3.0859, dtype=torch.float16)\n", - "lora_unet_single_blocks_28_modulation_lin.lora_down.weight : \n", - "tensor(5.7539, dtype=torch.float16)\n", - "lora_unet_single_blocks_29_linear1.lora_down.weight : \n", - "tensor(2.3242, dtype=torch.float16)\n", - "lora_unet_single_blocks_29_modulation_lin.lora_down.weight : \n", - "tensor(3.9160, dtype=torch.float16)\n", - "lora_unet_single_blocks_2_linear1.lora_down.weight : \n", - "tensor(2.1406, dtype=torch.float16)\n", - "lora_unet_single_blocks_2_modulation_lin.lora_down.weight : \n", - "tensor(2.1621, dtype=torch.float16)\n", - "lora_unet_single_blocks_30_linear1.lora_down.weight : \n", - "tensor(2.1211, dtype=torch.float16)\n", - "lora_unet_single_blocks_30_modulation_lin.lora_down.weight : \n", - "tensor(4.8516, dtype=torch.float16)\n", - "lora_unet_single_blocks_31_linear1.lora_down.weight : \n", - "tensor(2.2773, dtype=torch.float16)\n", - "lora_unet_single_blocks_31_modulation_lin.lora_down.weight : \n", - "tensor(4.1367, dtype=torch.float16)\n", - "lora_unet_single_blocks_32_linear1.lora_down.weight : \n", - "tensor(2.5273, dtype=torch.float16)\n", - "lora_unet_single_blocks_32_modulation_lin.lora_down.weight : \n", - "tensor(5.0508, dtype=torch.float16)\n", - "lora_unet_single_blocks_33_linear1.lora_down.weight : \n", - "tensor(2.7051, dtype=torch.float16)\n", - "lora_unet_single_blocks_33_modulation_lin.lora_down.weight : \n", - "tensor(5.2930, dtype=torch.float16)\n", - "lora_unet_single_blocks_34_linear1.lora_down.weight : \n", - "tensor(2.6738, dtype=torch.float16)\n", - "lora_unet_single_blocks_34_modulation_lin.lora_down.weight : \n", - "tensor(4.7852, dtype=torch.float16)\n", - "lora_unet_single_blocks_35_linear1.lora_down.weight : \n", - "tensor(2.5117, dtype=torch.float16)\n", - "lora_unet_single_blocks_35_modulation_lin.lora_down.weight : \n", - "tensor(6.7734, dtype=torch.float16)\n", - "lora_unet_single_blocks_36_linear1.lora_down.weight : \n", - "tensor(1.8418, dtype=torch.float16)\n", - "lora_unet_single_blocks_36_modulation_lin.lora_down.weight : \n", - "tensor(6.5859, dtype=torch.float16)\n", - "lora_unet_single_blocks_37_linear1.lora_down.weight : \n", - "tensor(2.4473, dtype=torch.float16)\n", - "lora_unet_single_blocks_37_modulation_lin.lora_down.weight : \n", - "tensor(2.5742, dtype=torch.float16)\n", - "lora_unet_single_blocks_3_linear1.lora_down.weight : \n", - "tensor(2.5566, dtype=torch.float16)\n", - "lora_unet_single_blocks_3_modulation_lin.lora_down.weight : \n", - "tensor(4.7148, dtype=torch.float16)\n", - "lora_unet_single_blocks_4_linear1.lora_down.weight : \n", - "tensor(2.2832, dtype=torch.float16)\n", - "lora_unet_single_blocks_4_modulation_lin.lora_down.weight : \n", - "tensor(2.0566, dtype=torch.float16)\n", - "lora_unet_single_blocks_5_linear1.lora_down.weight : \n", - "tensor(2.2109, dtype=torch.float16)\n", - "lora_unet_single_blocks_5_modulation_lin.lora_down.weight : \n", - "tensor(2.7793, dtype=torch.float16)\n", - "lora_unet_single_blocks_6_linear1.lora_down.weight : \n", - "tensor(3.0176, dtype=torch.float16)\n", - "lora_unet_single_blocks_6_modulation_lin.lora_down.weight : \n", - "tensor(2.9180, dtype=torch.float16)\n", - "lora_unet_single_blocks_7_linear1.lora_down.weight : \n", - "tensor(2.2461, dtype=torch.float16)\n", - "lora_unet_single_blocks_7_modulation_lin.lora_down.weight : \n", - "tensor(2.1074, dtype=torch.float16)\n", - "lora_unet_single_blocks_8_linear1.lora_down.weight : \n", - "tensor(3.0391, dtype=torch.float16)\n", - "lora_unet_single_blocks_8_modulation_lin.lora_down.weight : \n", - "tensor(2.0039, dtype=torch.float16)\n", - "lora_unet_single_blocks_9_linear1.lora_down.weight : \n", - "tensor(3.8789, dtype=torch.float16)\n", - "lora_unet_single_blocks_9_modulation_lin.lora_down.weight : \n", - "tensor(4.0547, dtype=torch.float16)\n" - ] - } - ] - }, - { - "cell_type": "markdown", - "source": [ - "<---- Upload your civiai trained .safetensor file to Google Colab before running the next cell\n", - "\n" - ], - "metadata": { - "id": "oDAUwfFzqzgj" - } - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "WQZ3BZn1p-pw" - }, - "outputs": [], - "source": [ - "civiai_lora = '' # @param {type:'string' ,placeholder:'ex. civitai_trained_e19.safetensors'}\n", - "tensor_art_filename = '' # @param {type:'string' ,placeholder:'ex. e19.safetensors'}\n", - "%cd /content/\n", - "tgt = load_file(f'{civiai_lora}')\n", - "for key in tgt:\n", - " tgt[f'{key}'] = tgt[f'{key}'].to(dtype=torch.float16)\n", - "%cd /content/\n", - "save_file(tgt , f'{tensor_art_filename}')" - ] - }, - { - "cell_type": "markdown", - "source": [ - "Download the new .safetensor file to your device.\n", - "\n", - "Downloading from CoLab Notebook will seemingly do nothing for ~5min. Then the file will download , so be patient.\n", - "\n", - "For faster/more consistent downloads , download your .safetensor file from your Google Drive" - ], - "metadata": { - "id": "blnBW-U4rAS7" - } - } - ] -} \ No newline at end of file