Upload sd_token_similarity_calculator.ipynb
Browse files
sd_token_similarity_calculator.ipynb
CHANGED
@@ -374,10 +374,10 @@
|
|
374 |
],
|
375 |
"metadata": {
|
376 |
"id": "JkzncP8SgKtS",
|
377 |
-
"outputId": "37351bed-c5e2-4554-c5e0-a9dc84da700b",
|
378 |
"colab": {
|
379 |
"base_uri": "https://localhost:8080/"
|
380 |
-
}
|
|
|
381 |
},
|
382 |
"execution_count": 6,
|
383 |
"outputs": [
|
@@ -1060,15 +1060,16 @@
|
|
1060 |
"my_mkdirs('/content/text_encodings/')\n",
|
1061 |
"filename = ''\n",
|
1062 |
"\n",
|
1063 |
-
"NUM_FILES =
|
1064 |
"\n",
|
1065 |
"for file_index in range(NUM_FILES + 1):\n",
|
1066 |
" if file_index <1: continue\n",
|
1067 |
-
"
|
|
|
1068 |
" #🦜 fusion-t2i-prompt-features-1.json\n",
|
1069 |
"\n",
|
1070 |
" # Read suffix.json\n",
|
1071 |
-
" %cd /content/text-to-image-prompts/
|
1072 |
" with open(filename + '.json', 'r') as f:\n",
|
1073 |
" data = json.load(f)\n",
|
1074 |
" _df = pd.DataFrame({'count': data})['count']\n",
|
@@ -1106,7 +1107,7 @@
|
|
1106 |
"source": [
|
1107 |
"# @title Download the created text_encodings as .zip file\n",
|
1108 |
"%cd /content/\n",
|
1109 |
-
"!zip -r /content/
|
1110 |
],
|
1111 |
"metadata": {
|
1112 |
"id": "gX-sHZPWj4Lt"
|
@@ -1114,6 +1115,19 @@
|
|
1114 |
"execution_count": null,
|
1115 |
"outputs": []
|
1116 |
},
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1117 |
{
|
1118 |
"cell_type": "markdown",
|
1119 |
"source": [
|
|
|
374 |
],
|
375 |
"metadata": {
|
376 |
"id": "JkzncP8SgKtS",
|
|
|
377 |
"colab": {
|
378 |
"base_uri": "https://localhost:8080/"
|
379 |
+
},
|
380 |
+
"outputId": "37351bed-c5e2-4554-c5e0-a9dc84da700b"
|
381 |
},
|
382 |
"execution_count": 6,
|
383 |
"outputs": [
|
|
|
1060 |
"my_mkdirs('/content/text_encodings/')\n",
|
1061 |
"filename = ''\n",
|
1062 |
"\n",
|
1063 |
+
"NUM_FILES = 7\n",
|
1064 |
"\n",
|
1065 |
"for file_index in range(NUM_FILES + 1):\n",
|
1066 |
" if file_index <1: continue\n",
|
1067 |
+
" #if file_index >4: break\n",
|
1068 |
+
" filename = f'🔹 fusion-t2i-sd15-clip-tokens-exotic-suffix-{file_index} Tokens'\n",
|
1069 |
" #🦜 fusion-t2i-prompt-features-1.json\n",
|
1070 |
"\n",
|
1071 |
" # Read suffix.json\n",
|
1072 |
+
" %cd /content/text-to-image-prompts/tokens/suffix/exotic/text\n",
|
1073 |
" with open(filename + '.json', 'r') as f:\n",
|
1074 |
" data = json.load(f)\n",
|
1075 |
" _df = pd.DataFrame({'count': data})['count']\n",
|
|
|
1107 |
"source": [
|
1108 |
"# @title Download the created text_encodings as .zip file\n",
|
1109 |
"%cd /content/\n",
|
1110 |
+
"!zip -r /content/tokens.zip /content/text-to-image-prompts/tokens"
|
1111 |
],
|
1112 |
"metadata": {
|
1113 |
"id": "gX-sHZPWj4Lt"
|
|
|
1115 |
"execution_count": null,
|
1116 |
"outputs": []
|
1117 |
},
|
1118 |
+
{
|
1119 |
+
"cell_type": "code",
|
1120 |
+
"source": [
|
1121 |
+
"# @title Download the created text_encodings as .zip file\n",
|
1122 |
+
"%cd /content/\n",
|
1123 |
+
"!zip -r /content/text-encodings.zip /content/text_encodings"
|
1124 |
+
],
|
1125 |
+
"metadata": {
|
1126 |
+
"id": "b3DUPYfskAIc"
|
1127 |
+
},
|
1128 |
+
"execution_count": null,
|
1129 |
+
"outputs": []
|
1130 |
+
},
|
1131 |
{
|
1132 |
"cell_type": "markdown",
|
1133 |
"source": [
|