Upload Joycaption_Alpha_One.ipynb
Browse files- Joycaption_Alpha_One.ipynb +243 -0
Joycaption_Alpha_One.ipynb
ADDED
@@ -0,0 +1,243 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"nbformat": 4,
|
3 |
+
"nbformat_minor": 0,
|
4 |
+
"metadata": {
|
5 |
+
"colab": {
|
6 |
+
"provenance": [],
|
7 |
+
"gpuType": "T4"
|
8 |
+
},
|
9 |
+
"kernelspec": {
|
10 |
+
"name": "python3",
|
11 |
+
"display_name": "Python 3"
|
12 |
+
},
|
13 |
+
"language_info": {
|
14 |
+
"name": "python"
|
15 |
+
},
|
16 |
+
"accelerator": "GPU"
|
17 |
+
},
|
18 |
+
"cells": [
|
19 |
+
{
|
20 |
+
"cell_type": "code",
|
21 |
+
"execution_count": null,
|
22 |
+
"metadata": {
|
23 |
+
"id": "Dwr7gk5OwuGC"
|
24 |
+
},
|
25 |
+
"outputs": [],
|
26 |
+
"source": [
|
27 |
+
"from google.colab import drive\n",
|
28 |
+
"drive.mount('/content/drive')"
|
29 |
+
]
|
30 |
+
},
|
31 |
+
{
|
32 |
+
"cell_type": "code",
|
33 |
+
"source": [
|
34 |
+
"!apt -y install -qq aria2\n",
|
35 |
+
"!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/joy-caption-alpha-one/raw/main/text_model/adapter_config.json -d /content/joy/text_model -o adapter_config.json\n",
|
36 |
+
"!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/joy-caption-alpha-one/resolve/main/text_model/adapter_model.safetensors -d /content/joy/text_model -o adapter_model.safetensors\n",
|
37 |
+
"!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/joy-caption-alpha-one/resolve/main/clip_model.pt -d /content/joy -o clip_model.pt\n",
|
38 |
+
"!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/joy-caption-alpha-one/raw/main/config.yaml -d /content/joy -o config.yaml\n",
|
39 |
+
"!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/joy-caption-alpha-one/resolve/main/image_adapter.pt -d /content/joy -o image_adapter.pt\n",
|
40 |
+
"\n",
|
41 |
+
"!pip install peft bitsandbytes\n",
|
42 |
+
"from huggingface_hub import InferenceClient\n",
|
43 |
+
"from torch import nn\n",
|
44 |
+
"from transformers import AutoModel, AutoProcessor, AutoTokenizer, PreTrainedTokenizer, PreTrainedTokenizerFast, AutoModelForCausalLM\n",
|
45 |
+
"import torch\n",
|
46 |
+
"import torch.amp.autocast_mode\n",
|
47 |
+
"from PIL import Image\n",
|
48 |
+
"import os\n",
|
49 |
+
"import torchvision.transforms.functional as TVF\n",
|
50 |
+
"\n",
|
51 |
+
"CLIP_PATH = \"google/siglip-so400m-patch14-384\"\n",
|
52 |
+
"MODEL_PATH = \"unsloth/Meta-Llama-3.1-8B\"\n",
|
53 |
+
"CAPTION_TYPE_MAP = {\n",
|
54 |
+
" (\"descriptive\", \"formal\", False, False): [\"Describe the image in 400 words\"],\n",
|
55 |
+
" (\"descriptive\", \"formal\", False, True): [\"Write a descriptive caption for this image in a formal tone within {word_count} words.\"],\n",
|
56 |
+
" (\"descriptive\", \"formal\", True, False): [\"Write a {length} descriptive caption for this image in a formal tone.\"],\n",
|
57 |
+
" (\"descriptive\", \"informal\", False, False): [\"Write a descriptive caption for this image in a casual tone.\"],\n",
|
58 |
+
" (\"descriptive\", \"informal\", False, True): [\"Write a descriptive caption for this image in a casual tone within {word_count} words.\"],\n",
|
59 |
+
" (\"descriptive\", \"informal\", True, False): [\"Write a {length} descriptive caption for this image in a casual tone.\"],\n",
|
60 |
+
" (\"training_prompt\", \"formal\", False, False): [\"Write a stable diffusion prompt for this image.\"],\n",
|
61 |
+
" (\"training_prompt\", \"formal\", False, True): [\"Write a stable diffusion prompt for this image within {word_count} words.\"],\n",
|
62 |
+
" (\"training_prompt\", \"formal\", True, False): [\"Write a {length} stable diffusion prompt for this image.\"],\n",
|
63 |
+
" (\"rng-tags\", \"formal\", False, False): [\"Write a list of Booru tags for this image.\"],\n",
|
64 |
+
" (\"rng-tags\", \"formal\", False, True): [\"Write a list of Booru tags for this image within {word_count} words.\"],\n",
|
65 |
+
" (\"rng-tags\", \"formal\", True, False): [\"Write a {length} list of Booru tags for this image.\"],\n",
|
66 |
+
"}\n",
|
67 |
+
"\n",
|
68 |
+
"class ImageAdapter(nn.Module):\n",
|
69 |
+
"\tdef __init__(self, input_features: int, output_features: int, ln1: bool, pos_emb: bool, num_image_tokens: int, deep_extract: bool):\n",
|
70 |
+
"\t\tsuper().__init__()\n",
|
71 |
+
"\t\tself.deep_extract = deep_extract\n",
|
72 |
+
"\t\tif self.deep_extract:\n",
|
73 |
+
"\t\t\tinput_features = input_features * 5\n",
|
74 |
+
"\t\tself.linear1 = nn.Linear(input_features, output_features)\n",
|
75 |
+
"\t\tself.activation = nn.GELU()\n",
|
76 |
+
"\t\tself.linear2 = nn.Linear(output_features, output_features)\n",
|
77 |
+
"\t\tself.ln1 = nn.Identity() if not ln1 else nn.LayerNorm(input_features)\n",
|
78 |
+
"\t\tself.pos_emb = None if not pos_emb else nn.Parameter(torch.zeros(num_image_tokens, input_features))\n",
|
79 |
+
"\t\tself.other_tokens = nn.Embedding(3, output_features)\n",
|
80 |
+
"\t\tself.other_tokens.weight.data.normal_(mean=0.0, std=0.02) # Matches HF's implementation of llama3\n",
|
81 |
+
"\tdef forward(self, vision_outputs: torch.Tensor):\n",
|
82 |
+
"\t\tif self.deep_extract:\n",
|
83 |
+
"\t\t\tx = torch.concat((\n",
|
84 |
+
"\t\t\t\tvision_outputs[-2],\n",
|
85 |
+
"\t\t\t\tvision_outputs[3],\n",
|
86 |
+
"\t\t\t\tvision_outputs[7],\n",
|
87 |
+
"\t\t\t\tvision_outputs[13],\n",
|
88 |
+
"\t\t\t\tvision_outputs[20],\n",
|
89 |
+
"\t\t\t), dim=-1)\n",
|
90 |
+
"\t\t\tassert len(x.shape) == 3, f\"Expected 3, got {len(x.shape)}\" # batch, tokens, features\n",
|
91 |
+
"\t\t\tassert x.shape[-1] == vision_outputs[-2].shape[-1] * 5, f\"Expected {vision_outputs[-2].shape[-1] * 5}, got {x.shape[-1]}\"\n",
|
92 |
+
"\t\telse:\n",
|
93 |
+
"\t\t\tx = vision_outputs[-2]\n",
|
94 |
+
"\t\tx = self.ln1(x)\n",
|
95 |
+
"\t\tif self.pos_emb is not None:\n",
|
96 |
+
"\t\t\tassert x.shape[-2:] == self.pos_emb.shape, f\"Expected {self.pos_emb.shape}, got {x.shape[-2:]}\"\n",
|
97 |
+
"\t\t\tx = x + self.pos_emb\n",
|
98 |
+
"\t\tx = self.linear1(x)\n",
|
99 |
+
"\t\tx = self.activation(x)\n",
|
100 |
+
"\t\tx = self.linear2(x)\n",
|
101 |
+
"\t\tother_tokens = self.other_tokens(torch.tensor([0, 1], device=self.other_tokens.weight.device).expand(x.shape[0], -1))\n",
|
102 |
+
"\t\tassert other_tokens.shape == (x.shape[0], 2, x.shape[2]), f\"Expected {(x.shape[0], 2, x.shape[2])}, got {other_tokens.shape}\"\n",
|
103 |
+
"\t\tx = torch.cat((other_tokens[:, 0:1], x, other_tokens[:, 1:2]), dim=1)\n",
|
104 |
+
"\t\treturn x\n",
|
105 |
+
"\tdef get_eot_embedding(self):\n",
|
106 |
+
"\t\treturn self.other_tokens(torch.tensor([2], device=self.other_tokens.weight.device)).squeeze(0)\n",
|
107 |
+
"\n",
|
108 |
+
"clip_processor = AutoProcessor.from_pretrained(CLIP_PATH)\n",
|
109 |
+
"clip_model = AutoModel.from_pretrained(CLIP_PATH)\n",
|
110 |
+
"clip_model = clip_model.vision_model\n",
|
111 |
+
"checkpoint = torch.load(\"/content/joy/clip_model.pt\", map_location='cpu')\n",
|
112 |
+
"checkpoint = {k.replace(\"_orig_mod.module.\", \"\"): v for k, v in checkpoint.items()}\n",
|
113 |
+
"clip_model.load_state_dict(checkpoint)\n",
|
114 |
+
"# del checkpoint\n",
|
115 |
+
"clip_model.eval()\n",
|
116 |
+
"clip_model.requires_grad_(False)\n",
|
117 |
+
"clip_model.to(\"cuda\")\n",
|
118 |
+
"tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH, use_fast=False)\n",
|
119 |
+
"assert isinstance(tokenizer, PreTrainedTokenizer) or isinstance(tokenizer, PreTrainedTokenizerFast), f\"Tokenizer is of type {type(tokenizer)}\"\n",
|
120 |
+
"text_model = AutoModelForCausalLM.from_pretrained(MODEL_PATH, load_in_8bit=True, device_map=\"auto\", torch_dtype=torch.bfloat16)\n",
|
121 |
+
"text_model.load_adapter(\"/content/joy/text_model\")\n",
|
122 |
+
"text_model.eval()\n",
|
123 |
+
"image_adapter = ImageAdapter(clip_model.config.hidden_size, text_model.config.hidden_size, False, False, 38, False)\n",
|
124 |
+
"image_adapter.load_state_dict(torch.load(\"/content/joy/image_adapter.pt\", map_location=\"cpu\"))\n",
|
125 |
+
"image_adapter.eval()\n",
|
126 |
+
"image_adapter.to(\"cuda\")\n",
|
127 |
+
"\n",
|
128 |
+
"@torch.no_grad()\n",
|
129 |
+
"def stream_chat(input_image: Image.Image, caption_type: str, caption_tone: str, caption_length: str | int) -> str:\n",
|
130 |
+
" torch.cuda.empty_cache()\n",
|
131 |
+
" length = None if caption_length == \"any\" else caption_length\n",
|
132 |
+
" if isinstance(length, str):\n",
|
133 |
+
" try:\n",
|
134 |
+
" length = int(length)\n",
|
135 |
+
" except ValueError:\n",
|
136 |
+
" pass\n",
|
137 |
+
" if caption_type == \"rng-tags\" or caption_type == \"training_prompt\":\n",
|
138 |
+
" caption_tone = \"formal\"\n",
|
139 |
+
" prompt_key = (caption_type, caption_tone, isinstance(length, str), isinstance(length, int))\n",
|
140 |
+
" if prompt_key not in CAPTION_TYPE_MAP:\n",
|
141 |
+
" raise ValueError(f\"Invalid caption type: {prompt_key}\")\n",
|
142 |
+
" prompt_str = CAPTION_TYPE_MAP[prompt_key][0].format(length=length, word_count=length)\n",
|
143 |
+
" print(f\"Prompt: {prompt_str}\")\n",
|
144 |
+
" image = input_image.resize((384, 384), Image.LANCZOS)\n",
|
145 |
+
" pixel_values = TVF.pil_to_tensor(image).unsqueeze(0) / 255.0\n",
|
146 |
+
" pixel_values = TVF.normalize(pixel_values, [0.5], [0.5])\n",
|
147 |
+
" pixel_values = pixel_values.to('cuda')\n",
|
148 |
+
" prompt = tokenizer.encode(prompt_str, return_tensors='pt', padding=False, truncation=False, add_special_tokens=False)\n",
|
149 |
+
" with torch.amp.autocast_mode.autocast('cuda', enabled=True):\n",
|
150 |
+
" vision_outputs = clip_model(pixel_values=pixel_values, output_hidden_states=True)\n",
|
151 |
+
" image_features = vision_outputs.hidden_states\n",
|
152 |
+
" embedded_images = image_adapter(image_features)\n",
|
153 |
+
" embedded_images = embedded_images.to('cuda')\n",
|
154 |
+
" prompt_embeds = text_model.model.embed_tokens(prompt.to('cuda'))\n",
|
155 |
+
" assert prompt_embeds.shape == (1, prompt.shape[1], text_model.config.hidden_size), f\"Prompt shape is {prompt_embeds.shape}, expected {(1, prompt.shape[1], text_model.config.hidden_size)}\"\n",
|
156 |
+
" embedded_bos = text_model.model.embed_tokens(torch.tensor([[tokenizer.bos_token_id]], device=text_model.device, dtype=torch.int64))\n",
|
157 |
+
" eot_embed = image_adapter.get_eot_embedding().unsqueeze(0).to(dtype=text_model.dtype)\n",
|
158 |
+
" inputs_embeds = torch.cat([\n",
|
159 |
+
" embedded_bos.expand(embedded_images.shape[0], -1, -1),\n",
|
160 |
+
" embedded_images.to(dtype=embedded_bos.dtype),\n",
|
161 |
+
" prompt_embeds.expand(embedded_images.shape[0], -1, -1),\n",
|
162 |
+
" eot_embed.expand(embedded_images.shape[0], -1, -1),\n",
|
163 |
+
" ], dim=1)\n",
|
164 |
+
" input_ids = torch.cat([\n",
|
165 |
+
" torch.tensor([[tokenizer.bos_token_id]], dtype=torch.long),\n",
|
166 |
+
" torch.zeros((1, embedded_images.shape[1]), dtype=torch.long),\n",
|
167 |
+
" prompt,\n",
|
168 |
+
" torch.tensor([[tokenizer.convert_tokens_to_ids(\"<|eot_id|>\")]], dtype=torch.long),\n",
|
169 |
+
" ], dim=1).to('cuda')\n",
|
170 |
+
" attention_mask = torch.ones_like(input_ids)\n",
|
171 |
+
" generate_ids = text_model.generate(input_ids, inputs_embeds=inputs_embeds, attention_mask=attention_mask, max_new_tokens=300, do_sample=True, suppress_tokens=None) # Uses the default which is temp=0.6, top_p=0.9\n",
|
172 |
+
" generate_ids = generate_ids[:, input_ids.shape[1]:]\n",
|
173 |
+
" if generate_ids[0][-1] == tokenizer.eos_token_id or generate_ids[0][-1] == tokenizer.convert_tokens_to_ids(\"<|eot_id|>\"):\n",
|
174 |
+
" generate_ids = generate_ids[:, :-1]\n",
|
175 |
+
" caption = tokenizer.batch_decode(generate_ids, skip_special_tokens=False, clean_up_tokenization_spaces=False)[0]\n",
|
176 |
+
" caption = f'{caption.strip()}'.replace('Prompt: Describe the image in 400 words','')\n",
|
177 |
+
" return caption"
|
178 |
+
],
|
179 |
+
"metadata": {
|
180 |
+
"id": "0zaheBIsw_dc"
|
181 |
+
},
|
182 |
+
"execution_count": null,
|
183 |
+
"outputs": []
|
184 |
+
},
|
185 |
+
{
|
186 |
+
"cell_type": "code",
|
187 |
+
"source": [
|
188 |
+
"import os\n",
|
189 |
+
"from PIL import Image\n",
|
190 |
+
"home_directory = '/content/'\n",
|
191 |
+
"using_Kaggle = os.environ.get('KAGGLE_URL_BASE','')\n",
|
192 |
+
"if using_Kaggle : home_directory = '/kaggle/working/'\n",
|
193 |
+
"%cd {home_directory}\n",
|
194 |
+
"\n",
|
195 |
+
"def my_mkdirs(folder):\n",
|
196 |
+
" if os.path.exists(folder)==False:\n",
|
197 |
+
" os.makedirs(folder)\n",
|
198 |
+
"\n",
|
199 |
+
"\n",
|
200 |
+
"tgt_folder = f'/content/drive/MyDrive/tmp/'\n",
|
201 |
+
"my_mkdirs(f'{tgt_folder}')\n",
|
202 |
+
"\n",
|
203 |
+
"\n",
|
204 |
+
"src_folder = '/content/drive/MyDrive/wild party/'\n",
|
205 |
+
"suffixes = ['.png', '.jpeg' , '.webp' , '.jpg']\n",
|
206 |
+
"num = 1\n",
|
207 |
+
"for filename in os.listdir(src_folder):\n",
|
208 |
+
" for suffix in suffixes:\n",
|
209 |
+
" if not filename.find(suffix)>-1: continue\n",
|
210 |
+
" print(filename)\n",
|
211 |
+
" %cd {src_folder}\n",
|
212 |
+
" input_image = Image.open(f\"{filename}\").convert('RGB')\n",
|
213 |
+
" caption = stream_chat(input_image, \"descriptive\", \"formal\", \"any\")\n",
|
214 |
+
" print(f\"...\\n\\n...caption for {filename}.{suffix}\\n\\n...\")\n",
|
215 |
+
" print(caption)\n",
|
216 |
+
" #---------#\n",
|
217 |
+
" %cd {tgt_folder}\n",
|
218 |
+
" f = open(f\"{num}.txt\", \"w\")\n",
|
219 |
+
" f.write(f'{caption}')\n",
|
220 |
+
" f.close()\n",
|
221 |
+
" input_image.save(f'{num}.png', \"PNG\")\n",
|
222 |
+
" num = num+1"
|
223 |
+
],
|
224 |
+
"metadata": {
|
225 |
+
"id": "J811UZU6xZEo"
|
226 |
+
},
|
227 |
+
"execution_count": null,
|
228 |
+
"outputs": []
|
229 |
+
},
|
230 |
+
{
|
231 |
+
"cell_type": "code",
|
232 |
+
"source": [
|
233 |
+
"from google.colab import runtime\n",
|
234 |
+
"runtime.unassign()"
|
235 |
+
],
|
236 |
+
"metadata": {
|
237 |
+
"id": "kM4TpfdB1amt"
|
238 |
+
},
|
239 |
+
"execution_count": null,
|
240 |
+
"outputs": []
|
241 |
+
}
|
242 |
+
]
|
243 |
+
}
|