codeShare commited on
Commit
cdc5b82
·
verified ·
1 Parent(s): b2c7214

Delete fusion_t2i_CLIP_interrogator.ipynb

Browse files
Files changed (1) hide show
  1. fusion_t2i_CLIP_interrogator.ipynb +0 -554
fusion_t2i_CLIP_interrogator.ipynb DELETED
@@ -1,554 +0,0 @@
1
- {
2
- "nbformat": 4,
3
- "nbformat_minor": 0,
4
- "metadata": {
5
- "colab": {
6
- "provenance": []
7
- },
8
- "kernelspec": {
9
- "name": "python3",
10
- "display_name": "Python 3"
11
- },
12
- "language_info": {
13
- "name": "python"
14
- }
15
- },
16
- "cells": [
17
- {
18
- "cell_type": "markdown",
19
- "source": [
20
- "Try this Free online SD 1.5 generator with the results: https://perchance.org/fusion-ai-image-generator\n",
21
- "\n",
22
- " This Notebook is a Stable-diffusion tool which allows you to find similiar prompts to an existing prompt. It uses the Nearest Neighbor decoder method listed here:https://arxiv.org/pdf/2303.03032"
23
- ],
24
- "metadata": {
25
- "id": "cRV2YWomjMBU"
26
- }
27
- },
28
- {
29
- "cell_type": "code",
30
- "source": [
31
- "import os\n",
32
- "home_directory = '/content/'\n",
33
- "using_Kaggle = os.environ.get('KAGGLE_URL_BASE','')\n",
34
- "if using_Kaggle : home_directory = '/kaggle/working/'\n",
35
- "%cd {home_directory}\n",
36
- "\n",
37
- "def fix_bad_symbols(txt):\n",
38
- " result = txt\n",
39
- " for symbol in ['^', '}', '{' , ')', '(', '[' , ']' , ':' , '=' ]:\n",
40
- " result = result.replace(symbol,'\\\\' + symbol)\n",
41
- " #------#\n",
42
- " return result;\n",
43
- "\n",
44
- "def my_mkdirs(folder):\n",
45
- " if os.path.exists(folder)==False:\n",
46
- " os.makedirs(folder)\n",
47
- "\n",
48
- "#🔸🔹\n",
49
- "# Load the data if not already loaded\n",
50
- "try:\n",
51
- " loaded\n",
52
- "except:\n",
53
- " from safetensors.torch import load_file , save_file\n",
54
- " import json , torch , requests , math\n",
55
- " import pandas as pd\n",
56
- " from PIL import Image\n",
57
- " #----#\n",
58
- " %cd {home_directory}\n",
59
- " !git clone https://huggingface.co/datasets/codeShare/fusion-t2i-generator-data\n",
60
- " loaded = True\n",
61
- " %cd {home_directory + 'fusion-t2i-generator-data/'}\n",
62
- " !unzip vocab.zip\n",
63
- " !unzip reference.zip\n",
64
- "#------#\n",
65
- "%cd {home_directory + 'fusion-t2i-generator-data/' + 'vocab'}\n",
66
- "with open(f'prompts.json', 'r') as f:\n",
67
- " data = json.load(f)\n",
68
- " _df = pd.DataFrame({'count': data})['count']\n",
69
- " prompts = {\n",
70
- " key : value for key, value in _df.items()\n",
71
- " }\n",
72
- "#-------#\n",
73
- "%cd {home_directory + 'fusion-t2i-generator-data/' + 'reference'}\n",
74
- "with open(f'reference_prompts.json', 'r') as f:\n",
75
- " data = json.load(f)\n",
76
- " _df = pd.DataFrame({'count': data})['count']\n",
77
- " target_prompts = {\n",
78
- " key : value for key, value in _df.items()\n",
79
- " }\n",
80
- "#------#\n",
81
- "with open(f'reference_urls.json', 'r') as f:\n",
82
- " data = json.load(f)\n",
83
- " _df = pd.DataFrame({'count': data})['count']\n",
84
- " target_urls = {\n",
85
- " key : value for key, value in _df.items()\n",
86
- " }\n",
87
- "from transformers import AutoTokenizer\n",
88
- "tokenizer = AutoTokenizer.from_pretrained(\"openai/clip-vit-large-patch14\", clean_up_tokenization_spaces = False)\n",
89
- "from transformers import CLIPProcessor, CLIPModel\n",
90
- "processor = CLIPProcessor.from_pretrained(\"openai/clip-vit-large-patch14\" , clean_up_tokenization_spaces = True)\n",
91
- "model = CLIPModel.from_pretrained(\"openai/clip-vit-large-patch14\")\n",
92
- "logit_scale = model.logit_scale.exp() #logit_scale = 100.00000762939453\n",
93
- "\n",
94
- "f_add = torch.nn.quantized.FloatFunctional()\n",
95
- "\n",
96
- "index = 0\n",
97
- "%cd {home_directory + 'fusion-t2i-generator-data/' + 'vocab'}\n",
98
- "\n",
99
- "vocab_encodings = torch.load('vocab_encodings.pt', weights_only=False)\n",
100
- "for key in vocab_encodings:\n",
101
- " index = index + 1;\n",
102
- "#------#\n",
103
- "NUM_VOCAB_ITEMS = index\n",
104
- "\n",
105
- "index = 0\n",
106
- "%cd {home_directory + 'fusion-t2i-generator-data/' + 'reference'}\n",
107
- "for key in torch.load('reference_text_and_image_encodings.pt', weights_only=False):\n",
108
- " index = index + 1;\n",
109
- "#------#\n",
110
- "NUM_REFERENCE_ITEMS = index\n"
111
- ],
112
- "metadata": {
113
- "id": "TC5lMJrS1HCC"
114
- },
115
- "execution_count": null,
116
- "outputs": []
117
- },
118
- {
119
- "cell_type": "code",
120
- "source": [
121
- "# @title \t⚄ Use a pre-encoded prompt + image pair from the fusion gen (note: NSFW!)\n",
122
- "# @markdown Choose a pre-encoded reference\n",
123
- "index = 457 # @param {type:\"slider\", min:0, max:1666, step:1}\n",
124
- "PROMPT_INDEX = index\n",
125
- "prompt = target_prompts[f'{PROMPT_INDEX}']\n",
126
- "url = target_urls[f'{PROMPT_INDEX}']\n",
127
- "if url.find('perchance')>-1:\n",
128
- " image = Image.open(requests.get(url, stream=True).raw)\n",
129
- "#------#\n",
130
- "# @markdown ⚖️ 🖼️ encoding <-----?-----> 📝 encoding </div> <br>\n",
131
- "C = 0.5 # @param {type:\"slider\", min:0, max:1, step:0.01}\n",
132
- "log_strength = 1 # @param {type:\"slider\", min:-5, max:5, step:0.01}\n",
133
- "prompt_strength = math.pow(10 ,log_strength-1)\n",
134
- "reference = torch.zeros(768)\n",
135
- "%cd {home_directory + 'fusion-t2i-generator-data/' + 'reference'}\n",
136
- "references = torch.load('reference_text_and_image_encodings.pt' , weights_only=False)\n",
137
- "reference = torch.add(reference, C * references[index][0].dequantize())\n",
138
- "reference = torch.add(reference, (1-C) * references[index][1].dequantize())\n",
139
- "references = ''\n",
140
- "# @markdown -----------\n",
141
- "# @markdown 📝➕ Enhance similarity to prompt(s)\n",
142
- "POS = '' # @param {type:'string' ,placeholder:'item1 , item2 , ...'}\n",
143
- "log_strength = 1.06 # @param {type:\"slider\", min:-5, max:5, step:0.01}\n",
144
- "pos_strength = math.pow(10 ,log_strength-1)\n",
145
- "for _POS in POS.split(','):\n",
146
- " inputs = tokenizer(text = _POS.strip(), truncation = True , padding=True, return_tensors=\"pt\")\n",
147
- " text_features_POS = model.get_text_features(**inputs)\n",
148
- " text_features_POS = text_features_POS/text_features_POS.norm(p=2, dim=-1, keepdim=True)\n",
149
- " reference = torch.add(reference, pos_strength * text_features_POS)\n",
150
- "# @markdown -----------\n",
151
- "\n",
152
- "# @markdown 🚫 Penalize similarity to prompt(s)\n",
153
- "NEG = '' # @param {type:'string' , placeholder:'item1 , item2 , ...'}\n",
154
- "log_strength = 1 # @param {type:\"slider\", min:-5, max:5, step:0.01}\n",
155
- "neg_strength = math.pow(10 ,log_strength-1)\n",
156
- "for _NEG in NEG.split(','):\n",
157
- " inputs = tokenizer(text = _NEG.strip(), truncation = True , padding=True, return_tensors=\"pt\")\n",
158
- " text_features_NEG = model.get_text_features(**inputs)\n",
159
- " text_features_NEG = text_features_NEG/text_features_NEG.norm(p=2, dim=-1, keepdim=True)\n",
160
- " reference = torch.add(reference, (-1) * neg_strength * text_features_NEG)\n",
161
- "# @markdown -----------\n",
162
- "# @markdown ⏩ Skip item(s) containing the word(s)\n",
163
- "SKIP = 'futa ' # @param {type:'string' , placeholder:'item1 , item2 , ...'}\n",
164
- "def isBlacklisted(txt):\n",
165
- " if txt.strip().isnumeric(): return True\n",
166
- " if blacklist.strip() == '': return False\n",
167
- " for item in list(blacklist.split(',')):\n",
168
- " if item.strip() == '' : continue\n",
169
- " if txt.find(item.strip())> -1 : return True\n",
170
- " #------#\n",
171
- " return False\n",
172
- "# @markdown -----------\n",
173
- "# @markdown 🔍 How similar should the results be?\n",
174
- "list_size = 1000 # @param {type:'number'}\n",
175
- "start_at_index = 1 # @param {type:'number'}\n",
176
- "# @markdown -----------\n",
177
- "# @markdown Repeat output N times\n",
178
- "N = 7 # @param {type:\"slider\", min:0, max:20, step:1}\n",
179
- "# @markdown -----------\n",
180
- "# @markdown ⚙️ Run the script?\n",
181
- "run_script = True # @param {type:\"boolean\"}\n",
182
- "enable = run_script\n",
183
- "if (enable):\n",
184
- " reference = reference/reference.norm(p=2, dim=-1, keepdim=True)\n",
185
- " %cd {home_directory + 'fusion-t2i-generator-data/' + 'vocab'}\n",
186
- " sims = torch.matmul(vocab_encodings.dequantize(),reference.t())\n",
187
- " sorted , indices = torch.sort(sims,dim=0 , descending=True)\n",
188
- "\n",
189
- " average = torch.zeros(768)\n",
190
- " for key in range(NUM_VOCAB_ITEMS):\n",
191
- " if (key>=start_at_index and key < start_at_index + list_size):\n",
192
- " average = torch.add(average, vocab_encodings[key].dequantize())\n",
193
- " if (key>=start_at_index + list_size) : break\n",
194
- " average = average * (1/max(1, list_size))\n",
195
- " average = average/average.norm(p=2, dim=-1, keepdim=True)\n",
196
- " print(average.norm(p=2, dim=-1, keepdim=True))\n",
197
- " average = average.clone().detach();\n",
198
- " variance = torch.zeros(1)\n",
199
- " for key in range(NUM_VOCAB_ITEMS):\n",
200
- " if (key>=start_at_index and key < start_at_index + list_size):\n",
201
- " #dot product\n",
202
- "\n",
203
- " difference_to_average = 100 * (torch.ones(1) - torch.dot(average[0]\n",
204
- " , vocab_encodings[key].dequantize()[0])/average.norm(p=2, dim=-1, keepdim=True))\n",
205
- "\n",
206
- " variance = torch.add(variance, difference_to_average * difference_to_average)\n",
207
- " if (key>=start_at_index + list_size) : break\n",
208
- " #--------#\n",
209
- " variance = variance * (1/max(1, list_size))\n",
210
- " variance= variance.clone().detach();\n",
211
- " print(f'The variance for the selected range is {math.sqrt(variance.item())} units from average')\n",
212
- "#---#\n",
213
- " output = '{'\n",
214
- " for _index in range(list_size):\n",
215
- " output = output + prompts[f'{indices[min(_index+start_at_index,NUM_VOCAB_ITEMS-1)].item()}'] + '|'\n",
216
- " #---------#\n",
217
- " output = (output + '}').replace('|}' , '}</w>')\n",
218
- " for iter in range(N):\n",
219
- " print(output)\n",
220
- "#-------#\n",
221
- "image or print('No image found')"
222
- ],
223
- "metadata": {
224
- "id": "NqL_I3ZSrISq"
225
- },
226
- "execution_count": null,
227
- "outputs": []
228
- },
229
- {
230
- "cell_type": "code",
231
- "source": [
232
- "# Check the average value for this set\n",
233
- "sims = torch.matmul(vocab_encodings.dequantize(),average.t())\n",
234
- "sorted , indices = torch.sort(sims,dim=0 , descending=True)\n",
235
- "for index in range(10):\n",
236
- " print(prompts[f'{indices[index].item()}'])"
237
- ],
238
- "metadata": {
239
- "id": "XNHz0hfhHRUu"
240
- },
241
- "execution_count": 113,
242
- "outputs": []
243
- },
244
- {
245
- "cell_type": "code",
246
- "source": [
247
- "# @title ⚙️📝 Print the results (Advanced)\n",
248
- "list_size = 1000 # @param {type:'number'}\n",
249
- "start_at_index = 0 # @param {type:'number'}\n",
250
- "print_Similarity = True # @param {type:\"boolean\"}\n",
251
- "print_Prompts = True # @param {type:\"boolean\"}\n",
252
- "print_Descriptions = True # @param {type:\"boolean\"}\n",
253
- "compact_Output = True # @param {type:\"boolean\"}\n",
254
- "newline_Separator = False # @param {type:\"boolean\"}\n",
255
- "\n",
256
- "import random\n",
257
- "# @markdown -----------\n",
258
- "# @markdown Mix with...\n",
259
- "list_size2 = 1000 # @param {type:'number'}\n",
260
- "start_at_index2 = 10000 # @param {type:'number'}\n",
261
- "rate_percent = 0 # @param {type:\"slider\", min:0, max:100, step:1}\n",
262
- "\n",
263
- "# @markdown -----------\n",
264
- "# @markdown Repeat output N times\n",
265
- "N = 6 # @param {type:\"slider\", min:0, max:10, step:1}\n",
266
- "\n",
267
- "# title Show the 100 most similiar suffix and prefix text-encodings to the text encoding\n",
268
- "RANGE = list_size\n",
269
- "separator = '|'\n",
270
- "if newline_Separator : separator = separator + '\\n'\n",
271
- "\n",
272
- "_prompts = ''\n",
273
- "_sims = ''\n",
274
- "for _index in range(start_at_index + RANGE):\n",
275
- " if _index < start_at_index : continue\n",
276
- " index = indices[_index].item()\n",
277
- "\n",
278
- " prompt = prompts[f'{index}']\n",
279
- " if rate_percent >= random.randint(0,100) : prompt = prompts[f'{random.randint(start_at_index2 , start_at_index2 + list_size2)}']\n",
280
- "\n",
281
- " #Remove duplicates\n",
282
- " if _prompts.find(prompt + separator)<=-1:\n",
283
- " _sims = _sims + f'{round(100*sims[index].item(), 2)} %' + separator\n",
284
- " #-------#\n",
285
- " _prompts = _prompts.replace(prompt + separator,'')\n",
286
- " _prompts = _prompts + prompt + separator\n",
287
- " #------#\n",
288
- "#------#\n",
289
- "__prompts = fix_bad_symbols(__prompts)\n",
290
- "__prompts = ('{' + _prompts + '}').replace(separator + '}', '}')\n",
291
- "__sims = ('{' + _sims + '}').replace(separator + '}', '}')\n",
292
- "#------#\n",
293
- "\n",
294
- "if(not print_Prompts): __prompts = ''\n",
295
- "if(not print_Similarity): __sims = ''\n",
296
- "\n",
297
- "if(not compact_Output):\n",
298
- " if(print_Descriptions):\n",
299
- " print(f'The {start_at_index}-{start_at_index + RANGE} most similiar items to prompt : \\n\\n ')\n",
300
- " for i in range(N) : print(__prompts)\n",
301
- " print(f'The {start_at_index}-{start_at_index + RANGE} similarity % for items : \\n\\n' + __sims)\n",
302
- " print('')\n",
303
- " else:\n",
304
- " for i in range(N) : print(__prompts)\n",
305
- "else:\n",
306
- " for i in range(N) : print(__prompts)\n",
307
- "#-------#"
308
- ],
309
- "metadata": {
310
- "id": "EdBiAguJO9aX",
311
- "cellView": "form"
312
- },
313
- "execution_count": null,
314
- "outputs": []
315
- },
316
- {
317
- "cell_type": "markdown",
318
- "source": [
319
- "The savefile can be used here : https://perchance.org/fusion-ai-image-generator"
320
- ],
321
- "metadata": {
322
- "id": "JldNmWy1iyvK"
323
- }
324
- },
325
- {
326
- "cell_type": "code",
327
- "source": [
328
- "# @title \t⚄ Create fusion-generator .json savefile from result\n",
329
- "filename = 'blank.json'\n",
330
- "path = '/content/text-to-image-prompts/fusion/'\n",
331
- "\n",
332
- "print(f'reading {filename}....')\n",
333
- "_index = 0\n",
334
- "%cd {path}\n",
335
- "with open(f'{filename}', 'r') as f:\n",
336
- " data = json.load(f)\n",
337
- "#------#\n",
338
- "_df = pd.DataFrame({'count': data})['count']\n",
339
- "_savefile = {\n",
340
- " key : value for key, value in _df.items()\n",
341
- "}\n",
342
- "#------#\n",
343
- "from safetensors.torch import load_file\n",
344
- "import json , os , torch\n",
345
- "import pandas as pd\n",
346
- "#----#\n",
347
- "def my_mkdirs(folder):\n",
348
- " if os.path.exists(folder)==False:\n",
349
- " os.makedirs(folder)\n",
350
- "#------#\n",
351
- "savefile_prompt = ''\n",
352
- "for i in range(N) : savefile_prompt = savefile_prompt + ' ' + __prompts\n",
353
- "_savefile['main'] = savefile_prompt.replace('\\n', ' ').replace(' ', ' ').replace(' ', ' ')\n",
354
- "#------#\n",
355
- "save_filename = f'fusion_C05_X7_1000_{PROMPT_INDEX}.json'\n",
356
- "output_folder = '/content/output/savefiles/'\n",
357
- "my_mkdirs(output_folder)\n",
358
- "#-----#\n",
359
- "%cd {output_folder}\n",
360
- "print(f'Saving segment {save_filename} to {output_folder}...')\n",
361
- "with open(save_filename, 'w') as f:\n",
362
- " json.dump(_savefile, f)\n"
363
- ],
364
- "metadata": {
365
- "id": "Q7vpNAXQilbf",
366
- "cellView": "form"
367
- },
368
- "execution_count": null,
369
- "outputs": []
370
- },
371
- {
372
- "cell_type": "code",
373
- "source": [
374
- "# @title \t⚄ Create a savefile-set from the entire range of pre-encoded items\n",
375
- "\n",
376
- "# @markdown 📥 Load the data (only required one time)\n",
377
- "load_the_data = True # @param {type:\"boolean\"}\n",
378
- "\n",
379
- "import math\n",
380
- "from safetensors.torch import load_file\n",
381
- "import json , os , torch\n",
382
- "import pandas as pd\n",
383
- "from PIL import Image\n",
384
- "import requests\n",
385
- "\n",
386
- "def my_mkdirs(folder):\n",
387
- " if os.path.exists(folder)==False:\n",
388
- " os.makedirs(folder)\n",
389
- "\n",
390
- "# @markdown ⚖️ Set the value for C in the reference <br> <br> sim = C* text_enc + image_enc*(1-C) <br><br>\n",
391
- "\n",
392
- "C = 0.5 # @param {type:\"slider\", min:0, max:1, step:0.01}\n",
393
- "\n",
394
- "# @markdown 🚫 Penalize similarity to this prompt(optional)\n",
395
- "if(load_the_data):\n",
396
- " target_prompts , target_text_encodings , urls , target_image_encodings , NUM_ITEMS = getPromptsAndLinks('/content/text-to-image-prompts/fusion')\n",
397
- " from transformers import AutoTokenizer\n",
398
- " tokenizer = AutoTokenizer.from_pretrained(\"openai/clip-vit-large-patch14\", clean_up_tokenization_spaces = False)\n",
399
- " from transformers import CLIPProcessor, CLIPModel\n",
400
- " processor = CLIPProcessor.from_pretrained(\"openai/clip-vit-large-patch14\" , clean_up_tokenization_spaces = True)\n",
401
- " model = CLIPModel.from_pretrained(\"openai/clip-vit-large-patch14\")\n",
402
- " logit_scale = model.logit_scale.exp() #logit_scale = 100.00000762939453\n",
403
- "#---------#\n",
404
- "\n",
405
- "filename = 'blank.json'\n",
406
- "path = '/content/text-to-image-prompts/fusion/'\n",
407
- "print(f'reading {filename}....')\n",
408
- "_index = 0\n",
409
- "%cd {path}\n",
410
- "with open(f'{filename}', 'r') as f:\n",
411
- " data = json.load(f)\n",
412
- "#------#\n",
413
- "_df = pd.DataFrame({'count': data})['count']\n",
414
- "_blank = {\n",
415
- " key : value for key, value in _df.items()\n",
416
- "}\n",
417
- "#------#\n",
418
- "\n",
419
- "root_savefile_name = 'fusion_C05_X7'\n",
420
- "\n",
421
- "%cd /content/\n",
422
- "output_folder = '/content/output/savefiles/'\n",
423
- "my_mkdirs(output_folder)\n",
424
- "my_mkdirs('/content/output2/savefiles/')\n",
425
- "my_mkdirs('/content/output3/savefiles/')\n",
426
- "my_mkdirs('/content/output4/savefiles/')\n",
427
- "my_mkdirs('/content/output5/savefiles/')\n",
428
- "my_mkdirs('/content/output6/savefiles/')\n",
429
- "my_mkdirs('/content/output7/savefiles/')\n",
430
- "my_mkdirs('/content/output8/savefiles/')\n",
431
- "my_mkdirs('/content/output9/savefiles/')\n",
432
- "my_mkdirs('/content/output10/savefiles/')\n",
433
- "my_mkdirs('/content/output11/savefiles/')\n",
434
- "my_mkdirs('/content/output12/savefiles/')\n",
435
- "my_mkdirs('/content/output13/savefiles/')\n",
436
- "\n",
437
- "\n",
438
- "NEG = '' # @param {type:'string'}\n",
439
- "strength = 1 # @param {type:\"slider\", min:-5, max:5, step:0.1}\n",
440
- "\n",
441
- "for index in range(1667):\n",
442
- "\n",
443
- " PROMPT_INDEX = index\n",
444
- " prompt = target_prompts[f'{index}']\n",
445
- " url = urls[f'{index}']\n",
446
- " if url.find('perchance')>-1:\n",
447
- " image = Image.open(requests.get(url, stream=True).raw)\n",
448
- " else: continue #print(\"(No image for this ID)\")\n",
449
- "\n",
450
- " print(f\"no. {PROMPT_INDEX} : '{prompt}'\")\n",
451
- " text_features_A = target_text_encodings[f'{index}']\n",
452
- " image_features_A = target_image_encodings[f'{index}']\n",
453
- " # text-similarity\n",
454
- " sims = C * torch.matmul(text_tensor, text_features_A.t())\n",
455
- "\n",
456
- " neg_sims = 0*sims\n",
457
- " if(NEG != ''):\n",
458
- " # Get text features for user input\n",
459
- " inputs = tokenizer(text = NEG, padding=True, return_tensors=\"pt\")\n",
460
- " text_features_NEG = model.get_text_features(**inputs)\n",
461
- " text_features_NEG = text_features_A/text_features_A.norm(p=2, dim=-1, keepdim=True)\n",
462
- " # text-similarity\n",
463
- " neg_sims = strength*torch.matmul(text_tensor, text_features_NEG.t())\n",
464
- " #------#\n",
465
- "\n",
466
- " # plus image-similarity\n",
467
- " sims = sims + (1-C) * torch.matmul(text_tensor, image_features_A.t()) * logit_scale\n",
468
- "\n",
469
- " # minus NEG-similarity\n",
470
- " sims = sims - neg_sims\n",
471
- "\n",
472
- " # Sort the items\n",
473
- " sorted , indices = torch.sort(sims,dim=0 , descending=True)\n",
474
- "\n",
475
- " # @markdown Repeat output N times\n",
476
- " RANGE = 1000\n",
477
- " NUM_CHUNKS = 10+\n",
478
- " separator = '|'\n",
479
- " _savefiles = {}\n",
480
- " #-----#\n",
481
- " for chunk in range(NUM_CHUNKS):\n",
482
- " if chunk=<10:continue\n",
483
- " start_at_index = chunk * RANGE\n",
484
- " _prompts = ''\n",
485
- " for _index in range(start_at_index + RANGE):\n",
486
- " if _index < start_at_index : continue\n",
487
- " index = indices[_index].item()\n",
488
- " prompt = prompts[f'{index}']\n",
489
- " _prompts = _prompts.replace(prompt + separator,'')\n",
490
- " _prompts = _prompts + prompt + separator\n",
491
- " #------#\n",
492
- " _prompts = fix_bad_symbols(_prompts)\n",
493
- " _prompts = ('{' + _prompts + '}').replace(separator + '}', '}')\n",
494
- " _savefiles[f'{chunk}'] = _prompts\n",
495
- " #---------#\n",
496
- " save_filename = f'{root_savefile_name}_{start_at_index + RANGE}_{PROMPT_INDEX}.json'\n",
497
- "\n",
498
- "\n",
499
- " if (chunk=<20 && chunk>10): %cd '/content/output2/savefiles/'\n",
500
- " if (chunk<=30 && chunk>20): %cd '/content/output3/savefiles/'\n",
501
- " if (chunk=<40 && chunk>30): %cd '/content/output4/savefiles/'\n",
502
- " if (chunk<=50 && chunk>40): %cd '/content/output5/savefiles/'\n",
503
- " if (chunk=<60 && chunk>50): %cd '/content/output6/savefiles/'\n",
504
- " if (chunk<=70 && chunk>60): %cd '/content/output7/savefiles/'\n",
505
- " if (chunk=<80 && chunk>70): %cd '/content/output8/savefiles/'\n",
506
- " if (chunk<=90 && chunk>80): %cd '/content/output9/savefiles/'\n",
507
- " if (chunk=<100 && chunk>90): %cd '/content/output10/savefiles/'\n",
508
- " if (chunk<=110 && chunk>100): %cd '/content/output11/savefiles/'\n",
509
- " if (chunk=<120 && chunk>110): %cd '/content/output12/savefiles/'\n",
510
- " if (chunk<=130 && chunk>120): %cd '/content/output13/savefiles/'\n",
511
- "\n",
512
- "\n",
513
- " #------#\n",
514
- " print(f'Saving savefile {save_filename} to {output_folder}...')\n",
515
- " with open(save_filename, 'w') as f:\n",
516
- " json.dump(_savefiles, f)\n",
517
- " #---------#\n",
518
- " continue\n",
519
- "#-----------#"
520
- ],
521
- "metadata": {
522
- "id": "x1uAVXZEoL0T",
523
- "cellView": "form"
524
- },
525
- "execution_count": null,
526
- "outputs": []
527
- },
528
- {
529
- "cell_type": "code",
530
- "source": [
531
- "# Determine if this notebook is running on Colab or Kaggle\n",
532
- "#Use https://www.kaggle.com/ if Google Colab GPU is busy\n",
533
- "home_directory = '/content/'\n",
534
- "using_Kaggle = os.environ.get('KAGGLE_URL_BASE','')\n",
535
- "if using_Kaggle : home_directory = '/kaggle/working/'\n",
536
- "%cd {home_directory}\n",
537
- "#-------#\n",
538
- "\n",
539
- "# @title Download the text_encodings as .zip\n",
540
- "import os\n",
541
- "%cd {home_directory}\n",
542
- "#os.remove(f'{home_directory}results.zip')\n",
543
- "root_output_folder = home_directory + 'output/'\n",
544
- "zip_dest = f'/content/results.zip' #drive/MyDrive\n",
545
- "!zip -r {zip_dest} {root_output_folder}"
546
- ],
547
- "metadata": {
548
- "id": "zivBNrw9uSVD"
549
- },
550
- "execution_count": null,
551
- "outputs": []
552
- }
553
- ]
554
- }