Upload fusion_t2i_CLIP_interrogator.ipynb
Browse files
Google Colab Jupyter Notebooks/fusion_t2i_CLIP_interrogator.ipynb
CHANGED
@@ -3114,8 +3114,8 @@
|
|
3114 |
"#------#\n",
|
3115 |
"# @markdown βοΈ πΌοΈ encoding <-----?-----> π encoding </div> <br>\n",
|
3116 |
"C = 0.5 # @param {type:\"slider\", min:0, max:1, step:0.01}\n",
|
3117 |
-
"
|
3118 |
-
"prompt_strength = math.pow(10 ,
|
3119 |
"reference = torch.zeros(768)\n",
|
3120 |
"\n",
|
3121 |
"%cd {home_directory + 'fusion-t2i-generator-data/' + 'reference'}\n",
|
@@ -3125,10 +3125,10 @@
|
|
3125 |
"references = '' # Clear up memory\n",
|
3126 |
"# @markdown -----------\n",
|
3127 |
"# @markdown πβ 1st Enhance similarity to prompt(s)\n",
|
3128 |
-
"
|
3129 |
-
"
|
3130 |
-
"pos_strength = math.pow(10 ,
|
3131 |
-
"for _POS in
|
3132 |
" inputs = tokenizer(text = _POS.strip(), truncation = True , padding=True, return_tensors=\"pt\")\n",
|
3133 |
" text_features_POS = model.get_text_features(**inputs)\n",
|
3134 |
" text_features_POS = text_features_POS/text_features_POS.norm(p=2, dim=-1, keepdim=True)\n",
|
@@ -3138,8 +3138,8 @@
|
|
3138 |
"# @markdown -----------\n",
|
3139 |
"# @markdown πβ 2nd Enhance similarity to prompt(s)\n",
|
3140 |
"POS = '' # @param {type:'string' ,placeholder:'item1 , item2 , ...'}\n",
|
3141 |
-
"
|
3142 |
-
"pos_strength = math.pow(10 ,
|
3143 |
"for _POS in POS.split(','):\n",
|
3144 |
" inputs = tokenizer(text = _POS.strip(), truncation = True , padding=True, return_tensors=\"pt\")\n",
|
3145 |
" text_features_POS = model.get_text_features(**inputs)\n",
|
@@ -3149,8 +3149,8 @@
|
|
3149 |
"\n",
|
3150 |
"# @markdown π« Penalize similarity to prompt(s)\n",
|
3151 |
"NEG = '' # @param {type:'string' , placeholder:'item1 , item2 , ...'}\n",
|
3152 |
-
"
|
3153 |
-
"neg_strength = math.pow(10 ,
|
3154 |
"for _NEG in NEG.split(','):\n",
|
3155 |
" inputs = tokenizer(text = _NEG.strip(), truncation = True , padding=True, return_tensors=\"pt\")\n",
|
3156 |
" text_features_NEG = model.get_text_features(**inputs)\n",
|
|
|
3114 |
"#------#\n",
|
3115 |
"# @markdown βοΈ πΌοΈ encoding <-----?-----> π encoding </div> <br>\n",
|
3116 |
"C = 0.5 # @param {type:\"slider\", min:0, max:1, step:0.01}\n",
|
3117 |
+
"log_strength_1 = 1 # @param {type:\"slider\", min:-5, max:5, step:0.01}\n",
|
3118 |
+
"prompt_strength = math.pow(10 ,log_strength_1-1)\n",
|
3119 |
"reference = torch.zeros(768)\n",
|
3120 |
"\n",
|
3121 |
"%cd {home_directory + 'fusion-t2i-generator-data/' + 'reference'}\n",
|
|
|
3125 |
"references = '' # Clear up memory\n",
|
3126 |
"# @markdown -----------\n",
|
3127 |
"# @markdown πβ 1st Enhance similarity to prompt(s)\n",
|
3128 |
+
"POS_2 = '' # @param {type:'string' ,placeholder:'item1 , item2 , ...'}\n",
|
3129 |
+
"log_strength_2 = 1.06 # @param {type:\"slider\", min:-5, max:5, step:0.01}\n",
|
3130 |
+
"pos_strength = math.pow(10 ,log_strength_2-1)\n",
|
3131 |
+
"for _POS in POS_2.split(','):\n",
|
3132 |
" inputs = tokenizer(text = _POS.strip(), truncation = True , padding=True, return_tensors=\"pt\")\n",
|
3133 |
" text_features_POS = model.get_text_features(**inputs)\n",
|
3134 |
" text_features_POS = text_features_POS/text_features_POS.norm(p=2, dim=-1, keepdim=True)\n",
|
|
|
3138 |
"# @markdown -----------\n",
|
3139 |
"# @markdown πβ 2nd Enhance similarity to prompt(s)\n",
|
3140 |
"POS = '' # @param {type:'string' ,placeholder:'item1 , item2 , ...'}\n",
|
3141 |
+
"log_strength_3 = 1.06 # @param {type:\"slider\", min:-5, max:5, step:0.01}\n",
|
3142 |
+
"pos_strength = math.pow(10 ,log_strength_3-1)\n",
|
3143 |
"for _POS in POS.split(','):\n",
|
3144 |
" inputs = tokenizer(text = _POS.strip(), truncation = True , padding=True, return_tensors=\"pt\")\n",
|
3145 |
" text_features_POS = model.get_text_features(**inputs)\n",
|
|
|
3149 |
"\n",
|
3150 |
"# @markdown π« Penalize similarity to prompt(s)\n",
|
3151 |
"NEG = '' # @param {type:'string' , placeholder:'item1 , item2 , ...'}\n",
|
3152 |
+
"log_strength_4 = 1 # @param {type:\"slider\", min:-5, max:5, step:0.01}\n",
|
3153 |
+
"neg_strength = math.pow(10 ,log_strength_4-1)\n",
|
3154 |
"for _NEG in NEG.split(','):\n",
|
3155 |
" inputs = tokenizer(text = _NEG.strip(), truncation = True , padding=True, return_tensors=\"pt\")\n",
|
3156 |
" text_features_NEG = model.get_text_features(**inputs)\n",
|