codeShare commited on
Commit
7ff3e68
·
verified ·
1 Parent(s): a84cd10

Upload joy_caption_alpha_one_jupyter.ipynb

Browse files
Files changed (1) hide show
  1. joy_caption_alpha_one_jupyter.ipynb +199 -0
joy_caption_alpha_one_jupyter.ipynb ADDED
@@ -0,0 +1,199 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": null,
6
+ "metadata": {
7
+ "id": "VjYy0F2gZIPR"
8
+ },
9
+ "outputs": [],
10
+ "source": [
11
+ "!apt -y install -qq aria2\n",
12
+ "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/joy-caption-alpha-one/raw/main/text_model/adapter_config.json -d /content/joy/text_model -o adapter_config.json\n",
13
+ "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/joy-caption-alpha-one/resolve/main/text_model/adapter_model.safetensors -d /content/joy/text_model -o adapter_model.safetensors\n",
14
+ "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/joy-caption-alpha-one/resolve/main/clip_model.pt -d /content/joy -o clip_model.pt\n",
15
+ "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/joy-caption-alpha-one/raw/main/config.yaml -d /content/joy -o config.yaml\n",
16
+ "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/joy-caption-alpha-one/resolve/main/image_adapter.pt -d /content/joy -o image_adapter.pt\n",
17
+ "\n",
18
+ "!pip install peft bitsandbytes"
19
+ ]
20
+ },
21
+ {
22
+ "cell_type": "code",
23
+ "execution_count": null,
24
+ "metadata": {
25
+ "id": "EBNKXBwIkJLk"
26
+ },
27
+ "outputs": [],
28
+ "source": [
29
+ "from huggingface_hub import InferenceClient\n",
30
+ "from torch import nn\n",
31
+ "from transformers import AutoModel, AutoProcessor, AutoTokenizer, PreTrainedTokenizer, PreTrainedTokenizerFast, AutoModelForCausalLM\n",
32
+ "import torch\n",
33
+ "import torch.amp.autocast_mode\n",
34
+ "from PIL import Image\n",
35
+ "import os\n",
36
+ "import torchvision.transforms.functional as TVF\n",
37
+ "\n",
38
+ "CLIP_PATH = \"google/siglip-so400m-patch14-384\"\n",
39
+ "MODEL_PATH = \"unsloth/Meta-Llama-3.1-8B\"\n",
40
+ "CAPTION_TYPE_MAP = {\n",
41
+ " (\"descriptive\", \"formal\", False, False): [\"Write a descriptive caption for this image in a formal tone.\"],\n",
42
+ " (\"descriptive\", \"formal\", False, True): [\"Write a descriptive caption for this image in a formal tone within {word_count} words.\"],\n",
43
+ " (\"descriptive\", \"formal\", True, False): [\"Write a {length} descriptive caption for this image in a formal tone.\"],\n",
44
+ " (\"descriptive\", \"informal\", False, False): [\"Write a descriptive caption for this image in a casual tone.\"],\n",
45
+ " (\"descriptive\", \"informal\", False, True): [\"Write a descriptive caption for this image in a casual tone within {word_count} words.\"],\n",
46
+ " (\"descriptive\", \"informal\", True, False): [\"Write a {length} descriptive caption for this image in a casual tone.\"],\n",
47
+ " (\"training_prompt\", \"formal\", False, False): [\"Write a stable diffusion prompt for this image.\"],\n",
48
+ " (\"training_prompt\", \"formal\", False, True): [\"Write a stable diffusion prompt for this image within {word_count} words.\"],\n",
49
+ " (\"training_prompt\", \"formal\", True, False): [\"Write a {length} stable diffusion prompt for this image.\"],\n",
50
+ " (\"rng-tags\", \"formal\", False, False): [\"Write a list of Booru tags for this image.\"],\n",
51
+ " (\"rng-tags\", \"formal\", False, True): [\"Write a list of Booru tags for this image within {word_count} words.\"],\n",
52
+ " (\"rng-tags\", \"formal\", True, False): [\"Write a {length} list of Booru tags for this image.\"],\n",
53
+ "}\n",
54
+ "\n",
55
+ "class ImageAdapter(nn.Module):\n",
56
+ "\tdef __init__(self, input_features: int, output_features: int, ln1: bool, pos_emb: bool, num_image_tokens: int, deep_extract: bool):\n",
57
+ "\t\tsuper().__init__()\n",
58
+ "\t\tself.deep_extract = deep_extract\n",
59
+ "\t\tif self.deep_extract:\n",
60
+ "\t\t\tinput_features = input_features * 5\n",
61
+ "\t\tself.linear1 = nn.Linear(input_features, output_features)\n",
62
+ "\t\tself.activation = nn.GELU()\n",
63
+ "\t\tself.linear2 = nn.Linear(output_features, output_features)\n",
64
+ "\t\tself.ln1 = nn.Identity() if not ln1 else nn.LayerNorm(input_features)\n",
65
+ "\t\tself.pos_emb = None if not pos_emb else nn.Parameter(torch.zeros(num_image_tokens, input_features))\n",
66
+ "\t\tself.other_tokens = nn.Embedding(3, output_features)\n",
67
+ "\t\tself.other_tokens.weight.data.normal_(mean=0.0, std=0.02) # Matches HF's implementation of llama3\n",
68
+ "\tdef forward(self, vision_outputs: torch.Tensor):\n",
69
+ "\t\tif self.deep_extract:\n",
70
+ "\t\t\tx = torch.concat((\n",
71
+ "\t\t\t\tvision_outputs[-2],\n",
72
+ "\t\t\t\tvision_outputs[3],\n",
73
+ "\t\t\t\tvision_outputs[7],\n",
74
+ "\t\t\t\tvision_outputs[13],\n",
75
+ "\t\t\t\tvision_outputs[20],\n",
76
+ "\t\t\t), dim=-1)\n",
77
+ "\t\t\tassert len(x.shape) == 3, f\"Expected 3, got {len(x.shape)}\" # batch, tokens, features\n",
78
+ "\t\t\tassert x.shape[-1] == vision_outputs[-2].shape[-1] * 5, f\"Expected {vision_outputs[-2].shape[-1] * 5}, got {x.shape[-1]}\"\n",
79
+ "\t\telse:\n",
80
+ "\t\t\tx = vision_outputs[-2]\n",
81
+ "\t\tx = self.ln1(x)\n",
82
+ "\t\tif self.pos_emb is not None:\n",
83
+ "\t\t\tassert x.shape[-2:] == self.pos_emb.shape, f\"Expected {self.pos_emb.shape}, got {x.shape[-2:]}\"\n",
84
+ "\t\t\tx = x + self.pos_emb\n",
85
+ "\t\tx = self.linear1(x)\n",
86
+ "\t\tx = self.activation(x)\n",
87
+ "\t\tx = self.linear2(x)\n",
88
+ "\t\tother_tokens = self.other_tokens(torch.tensor([0, 1], device=self.other_tokens.weight.device).expand(x.shape[0], -1))\n",
89
+ "\t\tassert other_tokens.shape == (x.shape[0], 2, x.shape[2]), f\"Expected {(x.shape[0], 2, x.shape[2])}, got {other_tokens.shape}\"\n",
90
+ "\t\tx = torch.cat((other_tokens[:, 0:1], x, other_tokens[:, 1:2]), dim=1)\n",
91
+ "\t\treturn x\n",
92
+ "\tdef get_eot_embedding(self):\n",
93
+ "\t\treturn self.other_tokens(torch.tensor([2], device=self.other_tokens.weight.device)).squeeze(0)\n",
94
+ "\n",
95
+ "clip_processor = AutoProcessor.from_pretrained(CLIP_PATH)\n",
96
+ "clip_model = AutoModel.from_pretrained(CLIP_PATH)\n",
97
+ "clip_model = clip_model.vision_model\n",
98
+ "checkpoint = torch.load(\"/content/joy/clip_model.pt\", map_location='cpu')\n",
99
+ "checkpoint = {k.replace(\"_orig_mod.module.\", \"\"): v for k, v in checkpoint.items()}\n",
100
+ "clip_model.load_state_dict(checkpoint)\n",
101
+ "# del checkpoint\n",
102
+ "clip_model.eval()\n",
103
+ "clip_model.requires_grad_(False)\n",
104
+ "clip_model.to(\"cuda\")\n",
105
+ "tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH, use_fast=False)\n",
106
+ "assert isinstance(tokenizer, PreTrainedTokenizer) or isinstance(tokenizer, PreTrainedTokenizerFast), f\"Tokenizer is of type {type(tokenizer)}\"\n",
107
+ "text_model = AutoModelForCausalLM.from_pretrained(MODEL_PATH, load_in_8bit=True, device_map=\"auto\", torch_dtype=torch.bfloat16)\n",
108
+ "text_model.load_adapter(\"/content/joy/text_model\")\n",
109
+ "text_model.eval()\n",
110
+ "image_adapter = ImageAdapter(clip_model.config.hidden_size, text_model.config.hidden_size, False, False, 38, False)\n",
111
+ "image_adapter.load_state_dict(torch.load(\"/content/joy/image_adapter.pt\", map_location=\"cpu\"))\n",
112
+ "image_adapter.eval()\n",
113
+ "image_adapter.to(\"cuda\")\n",
114
+ "\n",
115
+ "@torch.no_grad()\n",
116
+ "def stream_chat(input_image: Image.Image, caption_type: str, caption_tone: str, caption_length: str | int) -> str:\n",
117
+ " torch.cuda.empty_cache()\n",
118
+ " length = None if caption_length == \"any\" else caption_length\n",
119
+ " if isinstance(length, str):\n",
120
+ " try:\n",
121
+ " length = int(length)\n",
122
+ " except ValueError:\n",
123
+ " pass\n",
124
+ " if caption_type == \"rng-tags\" or caption_type == \"training_prompt\":\n",
125
+ " caption_tone = \"formal\"\n",
126
+ " prompt_key = (caption_type, caption_tone, isinstance(length, str), isinstance(length, int))\n",
127
+ " if prompt_key not in CAPTION_TYPE_MAP:\n",
128
+ " raise ValueError(f\"Invalid caption type: {prompt_key}\")\n",
129
+ " prompt_str = CAPTION_TYPE_MAP[prompt_key][0].format(length=length, word_count=length)\n",
130
+ " print(f\"Prompt: {prompt_str}\")\n",
131
+ " image = input_image.resize((384, 384), Image.LANCZOS)\n",
132
+ " pixel_values = TVF.pil_to_tensor(image).unsqueeze(0) / 255.0\n",
133
+ " pixel_values = TVF.normalize(pixel_values, [0.5], [0.5])\n",
134
+ " pixel_values = pixel_values.to('cuda')\n",
135
+ " prompt = tokenizer.encode(prompt_str, return_tensors='pt', padding=False, truncation=False, add_special_tokens=False)\n",
136
+ " with torch.amp.autocast_mode.autocast('cuda', enabled=True):\n",
137
+ " vision_outputs = clip_model(pixel_values=pixel_values, output_hidden_states=True)\n",
138
+ " image_features = vision_outputs.hidden_states\n",
139
+ " embedded_images = image_adapter(image_features)\n",
140
+ " embedded_images = embedded_images.to('cuda')\n",
141
+ " prompt_embeds = text_model.model.embed_tokens(prompt.to('cuda'))\n",
142
+ " assert prompt_embeds.shape == (1, prompt.shape[1], text_model.config.hidden_size), f\"Prompt shape is {prompt_embeds.shape}, expected {(1, prompt.shape[1], text_model.config.hidden_size)}\"\n",
143
+ " embedded_bos = text_model.model.embed_tokens(torch.tensor([[tokenizer.bos_token_id]], device=text_model.device, dtype=torch.int64))\n",
144
+ " eot_embed = image_adapter.get_eot_embedding().unsqueeze(0).to(dtype=text_model.dtype)\n",
145
+ " inputs_embeds = torch.cat([\n",
146
+ " embedded_bos.expand(embedded_images.shape[0], -1, -1),\n",
147
+ " embedded_images.to(dtype=embedded_bos.dtype),\n",
148
+ " prompt_embeds.expand(embedded_images.shape[0], -1, -1),\n",
149
+ " eot_embed.expand(embedded_images.shape[0], -1, -1),\n",
150
+ " ], dim=1)\n",
151
+ " input_ids = torch.cat([\n",
152
+ " torch.tensor([[tokenizer.bos_token_id]], dtype=torch.long),\n",
153
+ " torch.zeros((1, embedded_images.shape[1]), dtype=torch.long),\n",
154
+ " prompt,\n",
155
+ " torch.tensor([[tokenizer.convert_tokens_to_ids(\"<|eot_id|>\")]], dtype=torch.long),\n",
156
+ " ], dim=1).to('cuda')\n",
157
+ " attention_mask = torch.ones_like(input_ids)\n",
158
+ " generate_ids = text_model.generate(input_ids, inputs_embeds=inputs_embeds, attention_mask=attention_mask, max_new_tokens=300, do_sample=True, suppress_tokens=None) # Uses the default which is temp=0.6, top_p=0.9\n",
159
+ " generate_ids = generate_ids[:, input_ids.shape[1]:]\n",
160
+ " if generate_ids[0][-1] == tokenizer.eos_token_id or generate_ids[0][-1] == tokenizer.convert_tokens_to_ids(\"<|eot_id|>\"):\n",
161
+ " generate_ids = generate_ids[:, :-1]\n",
162
+ " caption = tokenizer.batch_decode(generate_ids, skip_special_tokens=False, clean_up_tokenization_spaces=False)[0]\n",
163
+ " return caption.strip()"
164
+ ]
165
+ },
166
+ {
167
+ "cell_type": "code",
168
+ "execution_count": null,
169
+ "metadata": {
170
+ "id": "ldMOiaY7kJLp"
171
+ },
172
+ "outputs": [],
173
+ "source": [
174
+ "input_image = Image.open(\"/content/jmwknt.jpg\")\n",
175
+ "# caption type (descriptive/training_prompt/rng-tags)\n",
176
+ "# caption tone (formal/informal)\n",
177
+ "# caption length (any/very short/short/medium-length/long/very long or a specific number)\n",
178
+ "caption = stream_chat(input_image, \"descriptive\", \"formal\", \"any\")\n",
179
+ "caption"
180
+ ]
181
+ }
182
+ ],
183
+ "metadata": {
184
+ "accelerator": "GPU",
185
+ "colab": {
186
+ "gpuType": "T4",
187
+ "provenance": []
188
+ },
189
+ "kernelspec": {
190
+ "display_name": "Python 3",
191
+ "name": "python3"
192
+ },
193
+ "language_info": {
194
+ "name": "python"
195
+ }
196
+ },
197
+ "nbformat": 4,
198
+ "nbformat_minor": 0
199
+ }