Upload fusion_t2i_CLIP_interrogator.ipynb
Browse files
Google Colab Jupyter Notebooks/fusion_t2i_CLIP_interrogator.ipynb
CHANGED
@@ -148,7 +148,10 @@
|
|
148 |
" %cd /content/\n",
|
149 |
" _ref = load_file('reference.safetensors' )\n",
|
150 |
" num_plots = num_plots+1\n",
|
151 |
-
"except: _ref = torch.zeros(dim).to(dtype = dot_dtype)\n",
|
|
|
|
|
|
|
152 |
"\n",
|
153 |
"image_size = 0.5 # @param {type:\"slider\", min:0, max:1, step:0.01}\n",
|
154 |
"show_encoding = True # @param {type:\"boolean\"}\n",
|
@@ -330,7 +333,7 @@
|
|
330 |
"metadata": {
|
331 |
"id": "IqUsiQw2HU2C"
|
332 |
},
|
333 |
-
"execution_count":
|
334 |
"outputs": []
|
335 |
},
|
336 |
{
|
@@ -403,7 +406,7 @@
|
|
403 |
"metadata": {
|
404 |
"id": "I_-GOwFPKkha"
|
405 |
},
|
406 |
-
"execution_count":
|
407 |
"outputs": []
|
408 |
},
|
409 |
{
|
@@ -467,7 +470,7 @@
|
|
467 |
{
|
468 |
"cell_type": "code",
|
469 |
"source": [
|
470 |
-
"# @title ⚄ Evaluate saved reference similarity to select items\n",
|
471 |
"EVAL = '' # @param {type:'string' ,placeholder:'item1 , item2 , ...'}\n",
|
472 |
"\n",
|
473 |
"# @markdown 📝 Enhance/Penalize Similarity and skip items containing word(s)\n",
|
@@ -478,8 +481,8 @@
|
|
478 |
"_POS = 0 # @param {type:\"slider\", min:-5, max:5, step:0.01}\n",
|
479 |
"_NEG = 0 # @param {type:\"slider\", min:-5, max:5, step:0.01}\n",
|
480 |
"\n",
|
481 |
-
"show_local_reference =
|
482 |
-
"show_encoding =
|
483 |
"\n",
|
484 |
"_ref = load_file('reference.safetensors' )\n",
|
485 |
"ref = _ref['weights'].to(dot_dtype)\n",
|
@@ -560,7 +563,9 @@
|
|
560 |
"if _START_AT.isnumeric(): START_AT = int(_START_AT)\n",
|
561 |
"\n",
|
562 |
"output_folder = home_directory + 'results/'\n",
|
|
|
563 |
"my_mkdirs(output_folder)\n",
|
|
|
564 |
"\n",
|
565 |
"\n",
|
566 |
"\n",
|
@@ -605,6 +610,13 @@
|
|
605 |
"_POS2 = 1 # @param {type:\"slider\", min:-5, max:5, step:0.01}\n",
|
606 |
"_NEG = 1 # @param {type:\"slider\", min:-5, max:5, step:0.01}\n",
|
607 |
"# @markdown -----\n",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
608 |
"for _item in POS1.split(','):\n",
|
609 |
" item = _item.strip()\n",
|
610 |
" if item == '':continue\n",
|
@@ -682,6 +694,11 @@
|
|
682 |
" #------#\n",
|
683 |
" sims = torch.matmul(text_encodings*scale, ref.t())\n",
|
684 |
" sorted , indices = torch.sort(sims , dim=0 , descending = True)\n",
|
|
|
|
|
|
|
|
|
|
|
685 |
" #-----#\n",
|
686 |
" for index in range(LIST_SIZE + START_AT):\n",
|
687 |
" if index<START_AT: continue\n",
|
@@ -696,14 +713,12 @@
|
|
696 |
" #-------#\n",
|
697 |
" continue\n",
|
698 |
"#---------#\n",
|
|
|
|
|
699 |
"print(f'\\nProcessed entire list of {total_items} items to find closest match.\\nSaved closest matching indices {START_AT} to {START_AT + LIST_SIZE} as the dict \"similiar_prompts\" with {LIST_SIZE} items.\\n')\n",
|
700 |
"\n",
|
701 |
"# Print results\n",
|
702 |
"sorted , indices = torch.sort(similiar_sims , dim=0 , descending = True)\n",
|
703 |
-
"include_similiarity = False # @param {type:\"boolean\"}\n",
|
704 |
-
"print_as_list = False # @param {type:\"boolean\"}\n",
|
705 |
-
"N = 7 # @param {type:\"slider\", min:0, max:10, step:1}\n",
|
706 |
-
"\n",
|
707 |
"if(print_as_list):\n",
|
708 |
" for index in range(LIST_SIZE):\n",
|
709 |
" key = indices[index].item()\n",
|
@@ -724,8 +739,14 @@
|
|
724 |
" prompt = (prompt + '}').replace('|}', '} ')\n",
|
725 |
" #------#\n",
|
726 |
" print(f'Similiar prompts: \\n\\n\\n{prompt} \\n\\n\\n//----//')\n",
|
727 |
-
"
|
728 |
-
"
|
|
|
|
|
|
|
|
|
|
|
|
|
729 |
],
|
730 |
"metadata": {
|
731 |
"id": "kOYZ8Ajn-DD8"
|
@@ -733,6 +754,90 @@
|
|
733 |
"execution_count": null,
|
734 |
"outputs": []
|
735 |
},
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
736 |
{
|
737 |
"cell_type": "code",
|
738 |
"source": [
|
|
|
148 |
" %cd /content/\n",
|
149 |
" _ref = load_file('reference.safetensors' )\n",
|
150 |
" num_plots = num_plots+1\n",
|
151 |
+
"except: _ref = torch.zeros(dim).to(dtype = dot_dtype)'\n",
|
152 |
+
"#-----#\n",
|
153 |
+
"try: ref\n",
|
154 |
+
"except: ref = torch.zeros(dim).to(dtype = dot_dtype)\n",
|
155 |
"\n",
|
156 |
"image_size = 0.5 # @param {type:\"slider\", min:0, max:1, step:0.01}\n",
|
157 |
"show_encoding = True # @param {type:\"boolean\"}\n",
|
|
|
333 |
"metadata": {
|
334 |
"id": "IqUsiQw2HU2C"
|
335 |
},
|
336 |
+
"execution_count": 4,
|
337 |
"outputs": []
|
338 |
},
|
339 |
{
|
|
|
406 |
"metadata": {
|
407 |
"id": "I_-GOwFPKkha"
|
408 |
},
|
409 |
+
"execution_count": 5,
|
410 |
"outputs": []
|
411 |
},
|
412 |
{
|
|
|
470 |
{
|
471 |
"cell_type": "code",
|
472 |
"source": [
|
473 |
+
"# @title ⚄ Evaluate saved reference similarity to select items (optional)\n",
|
474 |
"EVAL = '' # @param {type:'string' ,placeholder:'item1 , item2 , ...'}\n",
|
475 |
"\n",
|
476 |
"# @markdown 📝 Enhance/Penalize Similarity and skip items containing word(s)\n",
|
|
|
481 |
"_POS = 0 # @param {type:\"slider\", min:-5, max:5, step:0.01}\n",
|
482 |
"_NEG = 0 # @param {type:\"slider\", min:-5, max:5, step:0.01}\n",
|
483 |
"\n",
|
484 |
+
"show_local_reference = True # @param {type:\"boolean\"}\n",
|
485 |
+
"show_encoding = True # @param {type:\"boolean\"}\n",
|
486 |
"\n",
|
487 |
"_ref = load_file('reference.safetensors' )\n",
|
488 |
"ref = _ref['weights'].to(dot_dtype)\n",
|
|
|
563 |
"if _START_AT.isnumeric(): START_AT = int(_START_AT)\n",
|
564 |
"\n",
|
565 |
"output_folder = home_directory + 'results/'\n",
|
566 |
+
"output_folder_sims = home_directory + 'results/sims/'\n",
|
567 |
"my_mkdirs(output_folder)\n",
|
568 |
+
"my_mkdirs(output_folder_sims)\n",
|
569 |
"\n",
|
570 |
"\n",
|
571 |
"\n",
|
|
|
610 |
"_POS2 = 1 # @param {type:\"slider\", min:-5, max:5, step:0.01}\n",
|
611 |
"_NEG = 1 # @param {type:\"slider\", min:-5, max:5, step:0.01}\n",
|
612 |
"# @markdown -----\n",
|
613 |
+
"# @markdown Save similarity as a list for later review (this will slow down the code)\n",
|
614 |
+
"save_similiarity = True # @param {type:\"boolean\"}\n",
|
615 |
+
"# @markdown -----\n",
|
616 |
+
"include_similiarity = False # @param {type:\"boolean\"}\n",
|
617 |
+
"print_as_list = False # @param {type:\"boolean\"}\n",
|
618 |
+
"N = 7 # @param {type:\"slider\", min:0, max:10, step:1}\n",
|
619 |
+
"#-----#\n",
|
620 |
"for _item in POS1.split(','):\n",
|
621 |
" item = _item.strip()\n",
|
622 |
" if item == '':continue\n",
|
|
|
694 |
" #------#\n",
|
695 |
" sims = torch.matmul(text_encodings*scale, ref.t())\n",
|
696 |
" sorted , indices = torch.sort(sims , dim=0 , descending = True)\n",
|
697 |
+
" tmp = {}\n",
|
698 |
+
" tmp['weights'] = sorted\n",
|
699 |
+
" %cd {output_folder_sims}\n",
|
700 |
+
" save_file(tmp, root_filename + '_sims.safetensors')\n",
|
701 |
+
" tmp={}\n",
|
702 |
" #-----#\n",
|
703 |
" for index in range(LIST_SIZE + START_AT):\n",
|
704 |
" if index<START_AT: continue\n",
|
|
|
713 |
" #-------#\n",
|
714 |
" continue\n",
|
715 |
"#---------#\n",
|
716 |
+
"total_items = total_items + num_items+1\n",
|
717 |
+
"#-------#\n",
|
718 |
"print(f'\\nProcessed entire list of {total_items} items to find closest match.\\nSaved closest matching indices {START_AT} to {START_AT + LIST_SIZE} as the dict \"similiar_prompts\" with {LIST_SIZE} items.\\n')\n",
|
719 |
"\n",
|
720 |
"# Print results\n",
|
721 |
"sorted , indices = torch.sort(similiar_sims , dim=0 , descending = True)\n",
|
|
|
|
|
|
|
|
|
722 |
"if(print_as_list):\n",
|
723 |
" for index in range(LIST_SIZE):\n",
|
724 |
" key = indices[index].item()\n",
|
|
|
739 |
" prompt = (prompt + '}').replace('|}', '} ')\n",
|
740 |
" #------#\n",
|
741 |
" print(f'Similiar prompts: \\n\\n\\n{prompt} \\n\\n\\n//----//')\n",
|
742 |
+
"#-----#\n",
|
743 |
+
"\n",
|
744 |
+
"#Clear memory\n",
|
745 |
+
"_text_encodings = {}\n",
|
746 |
+
"prompts = {}\n",
|
747 |
+
"#-----#\n",
|
748 |
+
"\n",
|
749 |
+
"image\n"
|
750 |
],
|
751 |
"metadata": {
|
752 |
"id": "kOYZ8Ajn-DD8"
|
|
|
754 |
"execution_count": null,
|
755 |
"outputs": []
|
756 |
},
|
757 |
+
{
|
758 |
+
"cell_type": "code",
|
759 |
+
"source": [
|
760 |
+
"# @title ⚄ Evaluate similarities\n",
|
761 |
+
"%cd {output_folder_sims}\n",
|
762 |
+
"index = 0\n",
|
763 |
+
"for filename in os.listdir(output_folder_sims):\n",
|
764 |
+
" _sims = load_file(filename)\n",
|
765 |
+
" _sims = _sims['weights']\n",
|
766 |
+
" for _sim in _sims.tolist():\n",
|
767 |
+
" index = index + 1\n",
|
768 |
+
" #-------#\n",
|
769 |
+
"total_items = index\n",
|
770 |
+
"sims = torch.zeros(total_items)\n",
|
771 |
+
"index = 0\n",
|
772 |
+
"for filename in os.listdir(output_folder_sims):\n",
|
773 |
+
" _sims = load_file(filename)\n",
|
774 |
+
" _sims = _sims['weights']\n",
|
775 |
+
" for sim in _sims.tolist():\n",
|
776 |
+
" sims[index] = sim\n",
|
777 |
+
" index = index + 1\n",
|
778 |
+
" #-------#\n",
|
779 |
+
"#---------------#\n",
|
780 |
+
"_sorted , indices = torch.sort(sims , dim=0 , descending = True)\n",
|
781 |
+
"SCALE = 0.001\n",
|
782 |
+
"sorted = torch.round(_sorted/SCALE)\n",
|
783 |
+
"ZERO_POINT = sorted[total_items-1].item()\n",
|
784 |
+
"sorted = (sorted - torch.ones(total_items)*ZERO_POINT)\n",
|
785 |
+
"densities = torch.bincount(sorted.to(dtype = torch.int64))\n",
|
786 |
+
"yy = densities.tolist()\n",
|
787 |
+
"top = (sorted[0] + ZERO_POINT).to(dtype = torch.int64).item()\n",
|
788 |
+
"num_coords = round(top - ZERO_POINT)\n",
|
789 |
+
"xx = [round((ZERO_POINT + x)*100*SCALE,2) for x in range(num_coords+1)]\n",
|
790 |
+
"index = 0\n",
|
791 |
+
"for item in xx:\n",
|
792 |
+
" if item>0:break\n",
|
793 |
+
" index = index + 1\n",
|
794 |
+
"#----#\n",
|
795 |
+
"positive_bound = index\n",
|
796 |
+
"ss =list(xx)\n",
|
797 |
+
"tmp = 0\n",
|
798 |
+
"chunk = 1\n",
|
799 |
+
"CHUNK_SIZE = 1000\n",
|
800 |
+
"index = 0\n",
|
801 |
+
"for num in reversed(yy):\n",
|
802 |
+
" tmp = tmp + num\n",
|
803 |
+
" if(tmp>CHUNK_SIZE):\n",
|
804 |
+
" _tmp = math.floor(tmp/CHUNK_SIZE)\n",
|
805 |
+
" chunk = chunk + _tmp\n",
|
806 |
+
" tmp = tmp - CHUNK_SIZE * _tmp\n",
|
807 |
+
" ss[num_coords - index] = chunk\n",
|
808 |
+
" index = index + 1\n",
|
809 |
+
"#------#\n",
|
810 |
+
"fig, ax = plt.subplots()\n",
|
811 |
+
"fig.canvas.draw()\n",
|
812 |
+
"plt.plot(ss[positive_bound:], xx[positive_bound:])\n",
|
813 |
+
"plt.xlabel ('Search depth')\n",
|
814 |
+
"plt.ylabel ('Similarity')\n",
|
815 |
+
"plt.title ('Similarity to index')\n",
|
816 |
+
"plt.grid()\n",
|
817 |
+
"indices_depth = [item.get_text() for item in ax.get_xticklabels()]\n",
|
818 |
+
"sim_pcnts = [item.get_text() for item in ax.get_yticklabels()]\n",
|
819 |
+
"\n",
|
820 |
+
"index = 0\n",
|
821 |
+
"for index_depth in indices_depth:\n",
|
822 |
+
" indices_depth[index] = index_depth + 'K'\n",
|
823 |
+
" index = index + 1\n",
|
824 |
+
"#-------#\n",
|
825 |
+
"\n",
|
826 |
+
"index = 0\n",
|
827 |
+
"for sim_pcnt in sim_pcnts:\n",
|
828 |
+
" sim_pcnts[index] = sim_pcnt + '%'\n",
|
829 |
+
" index = index + 1\n",
|
830 |
+
"#-------#\n",
|
831 |
+
"ax.set_xticklabels(indices_depth)\n",
|
832 |
+
"ax.set_yticklabels(sim_pcnts)\n",
|
833 |
+
"plt.show()"
|
834 |
+
],
|
835 |
+
"metadata": {
|
836 |
+
"id": "ln6DsZPG99ez"
|
837 |
+
},
|
838 |
+
"execution_count": null,
|
839 |
+
"outputs": []
|
840 |
+
},
|
841 |
{
|
842 |
"cell_type": "code",
|
843 |
"source": [
|