codeShare commited on
Commit
68bbfd3
·
verified ·
1 Parent(s): 3d20c7d

Upload fusion_t2i_CLIP_interrogator.ipynb

Browse files
Google Colab Jupyter Notebooks/fusion_t2i_CLIP_interrogator.ipynb CHANGED
@@ -175,6 +175,92 @@
175
  "id": "Xf9zoq-Za3wi"
176
  }
177
  },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
178
  {
179
  "cell_type": "code",
180
  "source": [
@@ -289,92 +375,6 @@
289
  "execution_count": null,
290
  "outputs": []
291
  },
292
- {
293
- "cell_type": "markdown",
294
- "source": [
295
- "# 🖼️ Image encoders (optional)"
296
- ],
297
- "metadata": {
298
- "id": "f9_AcquM7AYZ"
299
- }
300
- },
301
- {
302
- "cell_type": "code",
303
- "source": [
304
- "# @title ⚄ 📷💭 Use pre-encoded image+prompt pair\n",
305
- "loaded_ref = False\n",
306
- "try:\n",
307
- " ref\n",
308
- " loaded_ref = True\n",
309
- "except:ref = torch.zeros(dim).to(dtype = dot_dtype)\n",
310
- "if loaded_ref : prev_ref = ref.clone().detach()\n",
311
- "\n",
312
- "try:prompt\n",
313
- "except: prompt = ''\n",
314
- "\n",
315
- "# @markdown 🖼️+📝 Choose a pre-encoded reference (note: some results are NSFW!)\n",
316
- "index = 596 # @param {type:\"slider\", min:0, max:1666, step:1}\n",
317
- "PROMPT_INDEX = index\n",
318
- "prompt = target_prompts[f'{PROMPT_INDEX}']\n",
319
- "url = target_urls[f'{PROMPT_INDEX}']\n",
320
- "if url.find('perchance')>-1:\n",
321
- " image = Image.open(requests.get(url, stream=True).raw)\n",
322
- "#------#\n",
323
- "%cd {home_directory + 'fusion-t2i-generator-data/' + 'reference'}\n",
324
- "references = torch.load('reference_text_and_image_encodings.pt' , weights_only=False)\n",
325
- "# @markdown ⚖️ 🖼️ encoding <-----?-----> 📝 encoding </div> <br>\n",
326
- "C = 0.3 # @param {type:\"slider\", min:0, max:1, step:0.01}\n",
327
- "log_strength = 1 # @param {type:\"slider\", min:-5, max:5, step:0.01}\n",
328
- "method = 'Add to existing ref' # @param [\"Refresh\" , \"Add to existing ref\" , \"Subtract from existing ref\" , \"Do nothing\"]\n",
329
- "image_size = 0.57 # @param {type:\"slider\", min:0, max:1, step:0.01}\n",
330
- "show_encoding = True # @param {type:\"boolean\"}\n",
331
- "\n",
332
- "if(not method == 'Do nothing'):\n",
333
- " if method == 'Refresh': ref = torch.zeros(dim).to(dtype = dot_dtype)\n",
334
- " if method == 'Subtract from existing ref':\n",
335
- " ref = torch.sub(ref, math.pow(10 ,log_strength-1) * C * references[index][0].dequantize().to(dtype = torch.float32))\n",
336
- " ref = torch.sub(ref, math.pow(10 ,log_strength-1) * (1-C) * references[index][1].dequantize().to(dtype = torch.float32))\n",
337
- " else:\n",
338
- " ref = torch.add(ref, math.pow(10 ,log_strength-1) * C * references[index][0].dequantize().to(dtype = torch.float32))\n",
339
- " ref = torch.add(ref, math.pow(10 ,log_strength-1) * (1-C) * references[index][1].dequantize().to(dtype = torch.float32))\n",
340
- " #---------#\n",
341
- " references = '' # Clear up memory\n",
342
- " ref = ref/ref.norm(p=2, dim=-1, keepdim=True)\n",
343
- " ref = ref.clone().detach()\n",
344
- " #------#\n",
345
- " # create figure\n",
346
- " fig = plt.figure(figsize=(10*image_size, 10*image_size))\n",
347
- " fig.patch.set_facecolor((56/255,56/255,56/255))\n",
348
- " rows = 1\n",
349
- " columns = 1\n",
350
- " if show_encoding: columns = columns+1\n",
351
- " if show_encoding and loaded_ref : columns = columns+1\n",
352
- " fig.add_subplot(rows, columns, 1)\n",
353
- " plt.imshow(image)\n",
354
- " plt.axis('off')\n",
355
- " plt.title(f\"Reference image at index={index}\" , color='white' , fontsize=round(20*image_size))\n",
356
- " #-----#\n",
357
- " if show_encoding and loaded_ref:\n",
358
- " fig.add_subplot(rows, columns, columns-1)\n",
359
- " plt.imshow( visualize(prev_ref))\n",
360
- " plt.axis('off')\n",
361
- " plt.title(\"Encoding (before)\" , color='white' , fontsize=round(20*image_size))\n",
362
- " print(f'Prompt for this image : \\n\\n \"{prompt} \" \\n\\n')\n",
363
- "\n",
364
- " if show_encoding:\n",
365
- " fig.add_subplot(rows, columns, columns)\n",
366
- " plt.imshow( visualize(ref))\n",
367
- " plt.axis('off')\n",
368
- " plt.title(\"Encoding (now)\" , color='white' , fontsize=round(20*image_size))\n",
369
- " #------#\n"
370
- ],
371
- "metadata": {
372
- "id": "BwrEs5zVB0Sb",
373
- "cellView": "form"
374
- },
375
- "execution_count": null,
376
- "outputs": []
377
- },
378
  {
379
  "cell_type": "markdown",
380
  "source": [
@@ -540,12 +540,10 @@
540
  {
541
  "cell_type": "markdown",
542
  "source": [
543
- "# CLIP Interrogator\n",
544
- "\n",
545
- "**Save the reference prior to running the Interrogator**"
546
  ],
547
  "metadata": {
548
- "id": "zeu6JcM-mk9z"
549
  }
550
  },
551
  {
 
175
  "id": "Xf9zoq-Za3wi"
176
  }
177
  },
178
+ {
179
+ "cell_type": "code",
180
+ "source": [
181
+ "# @title ⚄ 📷💭 Use pre-encoded image+prompt pair\n",
182
+ "loaded_ref = False\n",
183
+ "try:\n",
184
+ " ref\n",
185
+ " loaded_ref = True\n",
186
+ "except:ref = torch.zeros(dim).to(dtype = dot_dtype)\n",
187
+ "if loaded_ref : prev_ref = ref.clone().detach()\n",
188
+ "\n",
189
+ "try:prompt\n",
190
+ "except: prompt = ''\n",
191
+ "\n",
192
+ "# @markdown 🖼️+📝 Choose a pre-encoded reference (note: some results are NSFW!)\n",
193
+ "index = 596 # @param {type:\"slider\", min:0, max:1666, step:1}\n",
194
+ "PROMPT_INDEX = index\n",
195
+ "prompt = target_prompts[f'{PROMPT_INDEX}']\n",
196
+ "url = target_urls[f'{PROMPT_INDEX}']\n",
197
+ "if url.find('perchance')>-1:\n",
198
+ " image = Image.open(requests.get(url, stream=True).raw)\n",
199
+ "#------#\n",
200
+ "%cd {home_directory + 'fusion-t2i-generator-data/' + 'reference'}\n",
201
+ "references = torch.load('reference_text_and_image_encodings.pt' , weights_only=False)\n",
202
+ "# @markdown ⚖️ 🖼️ encoding <-----?-----> 📝 encoding </div> <br>\n",
203
+ "C = 0.3 # @param {type:\"slider\", min:0, max:1, step:0.01}\n",
204
+ "log_strength = 1 # @param {type:\"slider\", min:-5, max:5, step:0.01}\n",
205
+ "method = 'Add to existing ref' # @param [\"Refresh\" , \"Add to existing ref\" , \"Subtract from existing ref\" , \"Do nothing\"]\n",
206
+ "image_size = 0.57 # @param {type:\"slider\", min:0, max:1, step:0.01}\n",
207
+ "show_encoding = True # @param {type:\"boolean\"}\n",
208
+ "\n",
209
+ "if(not method == 'Do nothing'):\n",
210
+ " if method == 'Refresh': ref = torch.zeros(dim).to(dtype = dot_dtype)\n",
211
+ " if method == 'Subtract from existing ref':\n",
212
+ " ref = torch.sub(ref, math.pow(10 ,log_strength-1) * C * references[index][0].dequantize().to(dtype = torch.float32))\n",
213
+ " ref = torch.sub(ref, math.pow(10 ,log_strength-1) * (1-C) * references[index][1].dequantize().to(dtype = torch.float32))\n",
214
+ " else:\n",
215
+ " ref = torch.add(ref, math.pow(10 ,log_strength-1) * C * references[index][0].dequantize().to(dtype = torch.float32))\n",
216
+ " ref = torch.add(ref, math.pow(10 ,log_strength-1) * (1-C) * references[index][1].dequantize().to(dtype = torch.float32))\n",
217
+ " #---------#\n",
218
+ " references = '' # Clear up memory\n",
219
+ " ref = ref/ref.norm(p=2, dim=-1, keepdim=True)\n",
220
+ " ref = ref.clone().detach()\n",
221
+ " #------#\n",
222
+ " # create figure\n",
223
+ " fig = plt.figure(figsize=(10*image_size, 10*image_size))\n",
224
+ " fig.patch.set_facecolor((56/255,56/255,56/255))\n",
225
+ " rows = 1\n",
226
+ " columns = 1\n",
227
+ " if show_encoding: columns = columns+1\n",
228
+ " if show_encoding and loaded_ref : columns = columns+1\n",
229
+ " fig.add_subplot(rows, columns, 1)\n",
230
+ " plt.imshow(image)\n",
231
+ " plt.axis('off')\n",
232
+ " plt.title(f\"Reference image at index={index}\" , color='white' , fontsize=round(20*image_size))\n",
233
+ " #-----#\n",
234
+ " if show_encoding and loaded_ref:\n",
235
+ " fig.add_subplot(rows, columns, columns-1)\n",
236
+ " plt.imshow( visualize(prev_ref))\n",
237
+ " plt.axis('off')\n",
238
+ " plt.title(\"Encoding (before)\" , color='white' , fontsize=round(20*image_size))\n",
239
+ " print(f'Prompt for this image : \\n\\n \"{prompt} \" \\n\\n')\n",
240
+ "\n",
241
+ " if show_encoding:\n",
242
+ " fig.add_subplot(rows, columns, columns)\n",
243
+ " plt.imshow( visualize(ref))\n",
244
+ " plt.axis('off')\n",
245
+ " plt.title(\"Encoding (now)\" , color='white' , fontsize=round(20*image_size))\n",
246
+ " #------#\n"
247
+ ],
248
+ "metadata": {
249
+ "id": "BwrEs5zVB0Sb",
250
+ "cellView": "form"
251
+ },
252
+ "execution_count": null,
253
+ "outputs": []
254
+ },
255
+ {
256
+ "cell_type": "markdown",
257
+ "source": [
258
+ "# Other methods"
259
+ ],
260
+ "metadata": {
261
+ "id": "f9_AcquM7AYZ"
262
+ }
263
+ },
264
  {
265
  "cell_type": "code",
266
  "source": [
 
375
  "execution_count": null,
376
  "outputs": []
377
  },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
378
  {
379
  "cell_type": "markdown",
380
  "source": [
 
540
  {
541
  "cell_type": "markdown",
542
  "source": [
543
+ "# Search prompts using CLIP"
 
 
544
  ],
545
  "metadata": {
546
+ "id": "UqrYOkhlEQdM"
547
  }
548
  },
549
  {