diff --git "a/Google Colab Jupyter Notebooks/fusion_t2i_CLIP_interrogator.ipynb" "b/Google Colab Jupyter Notebooks/fusion_t2i_CLIP_interrogator.ipynb" --- "a/Google Colab Jupyter Notebooks/fusion_t2i_CLIP_interrogator.ipynb" +++ "b/Google Colab Jupyter Notebooks/fusion_t2i_CLIP_interrogator.ipynb" @@ -56,10 +56,13 @@ " import json , torch , requests , math\n", " import pandas as pd\n", " from PIL import Image\n", + " import cv2\n", " #----#\n", " %cd {home_directory}\n", " !git clone https://huggingface.co/datasets/codeShare/fusion-t2i-generator-data\n", " loaded = True\n", + " %cd {home_directory + 'fusion-t2i-generator-data/'}\n", + " !unzip reference.zip\n", "\n", "from transformers import AutoTokenizer\n", "tokenizer = AutoTokenizer.from_pretrained(\"openai/clip-vit-large-patch14\", clean_up_tokenization_spaces = False)\n", @@ -68,8 +71,6 @@ "model = CLIPModel.from_pretrained(\"openai/clip-vit-large-patch14\")\n", "logit_scale = model.logit_scale.exp() #logit_scale = 100.00000762939453\n", "\n", - "%cd {home_directory + 'fusion-t2i-generator-data/'}\n", - "!unzip reference.zip\n", "#------#\n", "%cd {home_directory + 'fusion-t2i-generator-data/' + 'reference'}\n", "with open(f'reference_prompts.json', 'r') as f:\n", @@ -89,11 +90,10 @@ "#------#\n", "dot_dtype = torch.float32\n", "dim = 768\n", - "reference = torch.zeros(dim).to(dtype = dot_dtype)" + "ref = torch.zeros(dim).to(dtype = dot_dtype)" ], "metadata": { - "id": "TC5lMJrS1HCC", - "cellView": "form" + "id": "TC5lMJrS1HCC" }, "execution_count": null, "outputs": [] @@ -110,6 +110,8 @@ { "cell_type": "code", "source": [ + "try: ref\n", + "except:ref = torch.zeros(dim).to(dtype = dot_dtype)\n", "# @markdown 🖼️+📝 Choose a pre-encoded reference (optional)\n", "index = 657 # @param {type:\"slider\", min:0, max:1666, step:1}\n", "PROMPT_INDEX = index\n", @@ -118,18 +120,24 @@ "if url.find('perchance')>-1:\n", " image = Image.open(requests.get(url, stream=True).raw)\n", "#------#\n", - "try: reference\n", - "except: reference = torch.zeros(dim).to(dtype = dot_dtype)\n", - "if reference == '': reference = torch.zeros(dim).to(dtype = dot_dtype)\n", + "%cd {home_directory + 'fusion-t2i-generator-data/' + 'reference'}\n", + "references = torch.load('reference_text_and_image_encodings.pt' , weights_only=False)\n", "# @markdown ⚖️ 🖼️ encoding <-----?-----> 📝 encoding
\n", "C = 0.3 # @param {type:\"slider\", min:0, max:1, step:0.01}\n", "log_strength = 1 # @param {type:\"slider\", min:-5, max:5, step:0.01}\n", - "%cd {home_directory + 'fusion-t2i-generator-data/' + 'reference'}\n", - "references = torch.load('reference_text_and_image_encodings.pt' , weights_only=False)\n", - "reference = torch.add(reference, math.pow(10 ,log_strength-1) * C * references[index][0].dequantize().to(dtype = torch.float32))\n", - "reference = torch.add(reference, math.pow(10 ,log_strength-1) * (1-C) * references[index][1].dequantize().to(dtype = torch.float32))\n", - "references = '' # Clear up memory\n", - "ref = reference.clone().detach()\n", + "method = 'Refresh' # @param [\"Refresh\" , \"Add to existing ref\" , \"Subtract from existing ref\" , \"Do nothing\"]\n", + "\n", + "if(not method == 'Do nothing'):\n", + " if method == 'Refresh': ref = torch.zeros(dim).to(dtype = dot_dtype)\n", + " if method == 'Subtract from existing ref':\n", + " ref = torch.sub(ref, math.pow(10 ,log_strength-1) * C * references[index][0].dequantize().to(dtype = torch.float32))\n", + " ref = torch.sub(ref, math.pow(10 ,log_strength-1) * (1-C) * references[index][1].dequantize().to(dtype = torch.float32))\n", + " else:\n", + " ref = torch.add(ref, math.pow(10 ,log_strength-1) * C * references[index][0].dequantize().to(dtype = torch.float32))\n", + " ref = torch.add(ref, math.pow(10 ,log_strength-1) * (1-C) * references[index][1].dequantize().to(dtype = torch.float32))\n", + " #---------#\n", + " references = '' # Clear up memory\n", + " ref = ref.clone().detach()\n", "#------#\n", "print(f'Prompt for this image : \\n\\n \"{prompt} \" \\n\\n')\n", "image" @@ -143,17 +151,28 @@ { "cell_type": "code", "source": [ + "try: ref\n", + "except:ref = torch.zeros(dim).to(dtype = dot_dtype)\n", "# @markdown 🖼️ Upload your own image for use as reference via URL (optional)\n", "URL = '' # @param {type:'string' ,placeholder:'paste an url here'}\n", - "image = Image.open(requests.get(URL, stream=True).raw)\n", - "#---------#\n", - "# Get image features\n", - "inputs = processor(images=image, return_tensors=\"pt\")\n", - "image_features = model.get_image_features(**inputs)\n", - "image_features = image_features / image_features.norm(p=2, dim=-1, keepdim=True)\n", - "#-----#\n", - "log_strength = 1 # @param {type:\"slider\", min:-5, max:5, step:0.01}\n", - "ref = ref + math.pow(10,log_strength-1)*image_features\n", + "if URL.strip() != '':\n", + " image = Image.open(requests.get(URL, stream=True).raw)\n", + " log_strength = 1 # @param {type:\"slider\", min:-5, max:5, step:0.01}\n", + " method = 'Do nothing' # @param [\"Refresh\" , \"Add to existing ref\" , \"Subtract from existing ref\" , \"Do nothing\"]\n", + " #---------#\n", + " if(not method == 'Do nothing'):\n", + " # Get image features\n", + " inputs = processor(images=image, return_tensors=\"pt\")\n", + " image_features = model.get_image_features(**inputs)\n", + " image_features = image_features / image_features.norm(p=2, dim=-1, keepdim=True)\n", + " #-------#\n", + " if method == 'Refresh':\n", + " ref = torch.zeros(dim).to(dtype = dot_dtype)\n", + " if method == 'Subtract from existing ref':\n", + " ref = ref - math.pow(10,log_strength-1)*image_features\n", + " else: ref = ref + math.pow(10,log_strength-1)*image_features\n", + " #-----#\n", + " ref = ref.clone().detach()\n", "image" ], "metadata": { @@ -165,20 +184,32 @@ { "cell_type": "code", "source": [ - "# @markdown 🖼️ Upload your own image in the /content/ folder for use as reference (optional)\n", + "try: ref\n", + "except:ref = torch.zeros(dim).to(dtype = dot_dtype)\n", + "# @markdown 🖼️ Upload your own image for use as reference via URL (optional)\n", "FILENAME = '' # @param {type:'string' ,placeholder:'IMG_123.png'}\n", - "import cv2\n", - "image = cv2.imread(FILENAME)\n", - "image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n", - "\n", - "#---------#\n", - "# Get image features\n", - "inputs = processor(images=image, return_tensors=\"pt\")\n", - "image_features = model.get_image_features(**inputs)\n", - "image_features = image_features / image_features.norm(p=2, dim=-1, keepdim=True)\n", - "#-----#\n", "log_strength = 1 # @param {type:\"slider\", min:-5, max:5, step:0.01}\n", - "ref = ref + math.pow(10,log_strength-1)*image_features\n", + "method = 'Do nothing' # @param [\"Refresh\" , \"Add to existing ref\" , \"Subtract from existing ref\" , \"Do nothing\"]\n", + "\n", + "if FILENAME.strip() != '':\n", + " %cd /content/\n", + " image = cv2.imread(FILENAME)\n", + " b,g,r = cv2.split(image)\n", + " image = cv2.merge([r,g,b])\n", + " #---------#\n", + " if(not method == 'Do nothing'):\n", + " # Get image features\n", + " inputs = processor(images=image, return_tensors=\"pt\")\n", + " image_features = model.get_image_features(**inputs)\n", + " image_features = image_features / image_features.norm(p=2, dim=-1, keepdim=True)\n", + " #-------#\n", + " if method == 'Refresh':\n", + " ref = torch.zeros(dim).to(dtype = dot_dtype)\n", + " if method == 'Subtract from existing ref':\n", + " ref = ref - math.pow(10,log_strength-1)*image_features\n", + " else: ref = ref + math.pow(10,log_strength-1)*image_features\n", + " #-----#\n", + " ref = ref.clone().detach()\n", "image" ], "metadata": { @@ -201,17 +232,31 @@ "source": [ "# @title ⚄ Save the reference\n", "try: ref\n", - "except: ref = torch.zeros(dim)\n", + "except:ref = torch.zeros(dim).to(dtype = dot_dtype)\n", + "reset_everything = False # @param {type:\"boolean\"}\n", + "if (reset_everything) : ref = torch.zeros(dim).to(dtype = dot_dtype)\n", "_ref = {}\n", "_ref['weights'] = ref.to(dot_dtype)\n", "%cd /content/\n", "save_file(_ref , 'reference.safetensors' )" ], "metadata": { - "id": "lOQuTPfBMK82" + "id": "lOQuTPfBMK82", + "outputId": "dfee8f93-f4bb-4bea-ba73-ca57ce96601c", + "colab": { + "base_uri": "https://localhost:8080/" + } }, - "execution_count": null, - "outputs": [] + "execution_count": 13, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "/content\n" + ] + } + ] }, { "cell_type": "code", @@ -358,7 +403,7 @@ "#---------#\n", "print(f'\\nProcessed entire list of {total_items} items to find closest match.\\nSaved closest matching indices {START_AT} to {LIST_SIZE} as the dict \"similiar_prompts\" with {START_AT + LIST_SIZE} items.\\n')\n", "\n", - "# @title ⚄ Print results\n", + "# Print results\n", "sorted , indices = torch.sort(similiar_sims , dim=0 , descending = True)\n", "include_similiarity = False # @param {type:\"boolean\"}\n", "print_as_list = False # @param {type:\"boolean\"}\n", @@ -393,6 +438,40 @@ "execution_count": null, "outputs": [] }, + { + "cell_type": "code", + "source": [ + "# @title ⚄ Save the results\n", + "\n", + "def mkdir(folder):\n", + " if os.path.exists(folder)==False:\n", + " os.makedirs(folder)\n", + "#-----#\n", + "output_folder = home_directory + 'results'\n", + "mkdir(output_folder)\n", + "#-----#\n", + "try: similiar_prompts\n", + "except:similiar_prompts = {}\n", + "%cd {output_folder}\n", + "print(f'Saving similiar_prompts.json to {output_folder}...')\n", + "with open('similiar_prompts.json', 'w') as f:\n", + " json.dump(similiar_prompts, f)\n", + "#-----#\n", + "try: similiar_sims\n", + "except: similiar_sims = torch.zeros(dim).to(dot_dtype)\n", + "#-------#\n", + "_similiar_sims = {}\n", + "_similiar_sims['weights'] = similiar_sims.to(dot_dtype)\n", + "%cd {output_folder}\n", + "print(f'Saving similiar_sims.safetensors to {output_folder}...')\n", + "save_file(_similiar_sims, 'similiar_sims.safetensors')\n" + ], + "metadata": { + "id": "m-N553nXz9Jd" + }, + "execution_count": null, + "outputs": [] + }, { "cell_type": "code", "source": [ @@ -402,9 +481,28 @@ "include_similiarity = False # @param {type:\"boolean\"}\n", "print_as_list = False # @param {type:\"boolean\"}\n", "N = 7 # @param {type:\"slider\", min:0, max:10, step:1}\n", + "FILENAME = '' # @param {type:'string' ,placeholder:'write .json file to load (optional)'}\n", + "_FILENAME = FILENAME.replace('.json' , '')\n", + "if _FILENAME.strip() == '': _FILENAME = 'similiar_prompts'\n", + "#------#\n", + "%cd {output_folder}\n", + "with open(f'{_FILENAME}.json', 'r') as f:\n", + " data = json.load(f)\n", + " _df = pd.DataFrame({'count': data})['count']\n", + " similiar_prompts = {\n", + " key : value for key, value in _df.items()\n", + " }\n", + "#-------#\n", + "_similiar_sims = load_file('similiar_sims.safetensors')\n", + "similiar_sims = _similiar_sims['weights'].to(dot_dtype)\n", + "\n", + "# @title ⚄ Run the CLIP interrogator on the saved reference\n", + "LIST_SIZE = 1000 # @param {type:'number' , placeholder:'set how large the list should be'}\n", + "START_AT = 0 # @param {type:'number' , placeholder:'set how large the list should be'}\n", "\n", "if(print_as_list):\n", - " for index in range(LIST_SIZE):\n", + " for index in range(LIST_SIZE + START_AT):\n", + " if index