codeShare commited on
Commit
37a0f9c
·
verified ·
1 Parent(s): e548504

Upload Joycaption_Alpha_One.ipynb

Browse files
Files changed (1) hide show
  1. Joycaption_Alpha_One.ipynb +1 -243
Joycaption_Alpha_One.ipynb CHANGED
@@ -1,243 +1 @@
1
- {
2
- "nbformat": 4,
3
- "nbformat_minor": 0,
4
- "metadata": {
5
- "colab": {
6
- "provenance": [],
7
- "gpuType": "T4"
8
- },
9
- "kernelspec": {
10
- "name": "python3",
11
- "display_name": "Python 3"
12
- },
13
- "language_info": {
14
- "name": "python"
15
- },
16
- "accelerator": "GPU"
17
- },
18
- "cells": [
19
- {
20
- "cell_type": "code",
21
- "execution_count": null,
22
- "metadata": {
23
- "id": "Dwr7gk5OwuGC"
24
- },
25
- "outputs": [],
26
- "source": [
27
- "from google.colab import drive\n",
28
- "drive.mount('/content/drive')"
29
- ]
30
- },
31
- {
32
- "cell_type": "code",
33
- "source": [
34
- "!apt -y install -qq aria2\n",
35
- "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/joy-caption-alpha-one/raw/main/text_model/adapter_config.json -d /content/joy/text_model -o adapter_config.json\n",
36
- "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/joy-caption-alpha-one/resolve/main/text_model/adapter_model.safetensors -d /content/joy/text_model -o adapter_model.safetensors\n",
37
- "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/joy-caption-alpha-one/resolve/main/clip_model.pt -d /content/joy -o clip_model.pt\n",
38
- "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/joy-caption-alpha-one/raw/main/config.yaml -d /content/joy -o config.yaml\n",
39
- "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/joy-caption-alpha-one/resolve/main/image_adapter.pt -d /content/joy -o image_adapter.pt\n",
40
- "\n",
41
- "!pip install peft bitsandbytes\n",
42
- "from huggingface_hub import InferenceClient\n",
43
- "from torch import nn\n",
44
- "from transformers import AutoModel, AutoProcessor, AutoTokenizer, PreTrainedTokenizer, PreTrainedTokenizerFast, AutoModelForCausalLM\n",
45
- "import torch\n",
46
- "import torch.amp.autocast_mode\n",
47
- "from PIL import Image\n",
48
- "import os\n",
49
- "import torchvision.transforms.functional as TVF\n",
50
- "\n",
51
- "CLIP_PATH = \"google/siglip-so400m-patch14-384\"\n",
52
- "MODEL_PATH = \"unsloth/Meta-Llama-3.1-8B\"\n",
53
- "CAPTION_TYPE_MAP = {\n",
54
- " (\"descriptive\", \"formal\", False, False): [\"Describe the image in 400 words\"],\n",
55
- " (\"descriptive\", \"formal\", False, True): [\"Write a descriptive caption for this image in a formal tone within {word_count} words.\"],\n",
56
- " (\"descriptive\", \"formal\", True, False): [\"Write a {length} descriptive caption for this image in a formal tone.\"],\n",
57
- " (\"descriptive\", \"informal\", False, False): [\"Write a descriptive caption for this image in a casual tone.\"],\n",
58
- " (\"descriptive\", \"informal\", False, True): [\"Write a descriptive caption for this image in a casual tone within {word_count} words.\"],\n",
59
- " (\"descriptive\", \"informal\", True, False): [\"Write a {length} descriptive caption for this image in a casual tone.\"],\n",
60
- " (\"training_prompt\", \"formal\", False, False): [\"Write a stable diffusion prompt for this image.\"],\n",
61
- " (\"training_prompt\", \"formal\", False, True): [\"Write a stable diffusion prompt for this image within {word_count} words.\"],\n",
62
- " (\"training_prompt\", \"formal\", True, False): [\"Write a {length} stable diffusion prompt for this image.\"],\n",
63
- " (\"rng-tags\", \"formal\", False, False): [\"Write a list of Booru tags for this image.\"],\n",
64
- " (\"rng-tags\", \"formal\", False, True): [\"Write a list of Booru tags for this image within {word_count} words.\"],\n",
65
- " (\"rng-tags\", \"formal\", True, False): [\"Write a {length} list of Booru tags for this image.\"],\n",
66
- "}\n",
67
- "\n",
68
- "class ImageAdapter(nn.Module):\n",
69
- "\tdef __init__(self, input_features: int, output_features: int, ln1: bool, pos_emb: bool, num_image_tokens: int, deep_extract: bool):\n",
70
- "\t\tsuper().__init__()\n",
71
- "\t\tself.deep_extract = deep_extract\n",
72
- "\t\tif self.deep_extract:\n",
73
- "\t\t\tinput_features = input_features * 5\n",
74
- "\t\tself.linear1 = nn.Linear(input_features, output_features)\n",
75
- "\t\tself.activation = nn.GELU()\n",
76
- "\t\tself.linear2 = nn.Linear(output_features, output_features)\n",
77
- "\t\tself.ln1 = nn.Identity() if not ln1 else nn.LayerNorm(input_features)\n",
78
- "\t\tself.pos_emb = None if not pos_emb else nn.Parameter(torch.zeros(num_image_tokens, input_features))\n",
79
- "\t\tself.other_tokens = nn.Embedding(3, output_features)\n",
80
- "\t\tself.other_tokens.weight.data.normal_(mean=0.0, std=0.02) # Matches HF's implementation of llama3\n",
81
- "\tdef forward(self, vision_outputs: torch.Tensor):\n",
82
- "\t\tif self.deep_extract:\n",
83
- "\t\t\tx = torch.concat((\n",
84
- "\t\t\t\tvision_outputs[-2],\n",
85
- "\t\t\t\tvision_outputs[3],\n",
86
- "\t\t\t\tvision_outputs[7],\n",
87
- "\t\t\t\tvision_outputs[13],\n",
88
- "\t\t\t\tvision_outputs[20],\n",
89
- "\t\t\t), dim=-1)\n",
90
- "\t\t\tassert len(x.shape) == 3, f\"Expected 3, got {len(x.shape)}\" # batch, tokens, features\n",
91
- "\t\t\tassert x.shape[-1] == vision_outputs[-2].shape[-1] * 5, f\"Expected {vision_outputs[-2].shape[-1] * 5}, got {x.shape[-1]}\"\n",
92
- "\t\telse:\n",
93
- "\t\t\tx = vision_outputs[-2]\n",
94
- "\t\tx = self.ln1(x)\n",
95
- "\t\tif self.pos_emb is not None:\n",
96
- "\t\t\tassert x.shape[-2:] == self.pos_emb.shape, f\"Expected {self.pos_emb.shape}, got {x.shape[-2:]}\"\n",
97
- "\t\t\tx = x + self.pos_emb\n",
98
- "\t\tx = self.linear1(x)\n",
99
- "\t\tx = self.activation(x)\n",
100
- "\t\tx = self.linear2(x)\n",
101
- "\t\tother_tokens = self.other_tokens(torch.tensor([0, 1], device=self.other_tokens.weight.device).expand(x.shape[0], -1))\n",
102
- "\t\tassert other_tokens.shape == (x.shape[0], 2, x.shape[2]), f\"Expected {(x.shape[0], 2, x.shape[2])}, got {other_tokens.shape}\"\n",
103
- "\t\tx = torch.cat((other_tokens[:, 0:1], x, other_tokens[:, 1:2]), dim=1)\n",
104
- "\t\treturn x\n",
105
- "\tdef get_eot_embedding(self):\n",
106
- "\t\treturn self.other_tokens(torch.tensor([2], device=self.other_tokens.weight.device)).squeeze(0)\n",
107
- "\n",
108
- "clip_processor = AutoProcessor.from_pretrained(CLIP_PATH)\n",
109
- "clip_model = AutoModel.from_pretrained(CLIP_PATH)\n",
110
- "clip_model = clip_model.vision_model\n",
111
- "checkpoint = torch.load(\"/content/joy/clip_model.pt\", map_location='cpu')\n",
112
- "checkpoint = {k.replace(\"_orig_mod.module.\", \"\"): v for k, v in checkpoint.items()}\n",
113
- "clip_model.load_state_dict(checkpoint)\n",
114
- "# del checkpoint\n",
115
- "clip_model.eval()\n",
116
- "clip_model.requires_grad_(False)\n",
117
- "clip_model.to(\"cuda\")\n",
118
- "tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH, use_fast=False)\n",
119
- "assert isinstance(tokenizer, PreTrainedTokenizer) or isinstance(tokenizer, PreTrainedTokenizerFast), f\"Tokenizer is of type {type(tokenizer)}\"\n",
120
- "text_model = AutoModelForCausalLM.from_pretrained(MODEL_PATH, load_in_8bit=True, device_map=\"auto\", torch_dtype=torch.bfloat16)\n",
121
- "text_model.load_adapter(\"/content/joy/text_model\")\n",
122
- "text_model.eval()\n",
123
- "image_adapter = ImageAdapter(clip_model.config.hidden_size, text_model.config.hidden_size, False, False, 38, False)\n",
124
- "image_adapter.load_state_dict(torch.load(\"/content/joy/image_adapter.pt\", map_location=\"cpu\"))\n",
125
- "image_adapter.eval()\n",
126
- "image_adapter.to(\"cuda\")\n",
127
- "\n",
128
- "@torch.no_grad()\n",
129
- "def stream_chat(input_image: Image.Image, caption_type: str, caption_tone: str, caption_length: str | int) -> str:\n",
130
- " torch.cuda.empty_cache()\n",
131
- " length = None if caption_length == \"any\" else caption_length\n",
132
- " if isinstance(length, str):\n",
133
- " try:\n",
134
- " length = int(length)\n",
135
- " except ValueError:\n",
136
- " pass\n",
137
- " if caption_type == \"rng-tags\" or caption_type == \"training_prompt\":\n",
138
- " caption_tone = \"formal\"\n",
139
- " prompt_key = (caption_type, caption_tone, isinstance(length, str), isinstance(length, int))\n",
140
- " if prompt_key not in CAPTION_TYPE_MAP:\n",
141
- " raise ValueError(f\"Invalid caption type: {prompt_key}\")\n",
142
- " prompt_str = CAPTION_TYPE_MAP[prompt_key][0].format(length=length, word_count=length)\n",
143
- " print(f\"Prompt: {prompt_str}\")\n",
144
- " image = input_image.resize((384, 384), Image.LANCZOS)\n",
145
- " pixel_values = TVF.pil_to_tensor(image).unsqueeze(0) / 255.0\n",
146
- " pixel_values = TVF.normalize(pixel_values, [0.5], [0.5])\n",
147
- " pixel_values = pixel_values.to('cuda')\n",
148
- " prompt = tokenizer.encode(prompt_str, return_tensors='pt', padding=False, truncation=False, add_special_tokens=False)\n",
149
- " with torch.amp.autocast_mode.autocast('cuda', enabled=True):\n",
150
- " vision_outputs = clip_model(pixel_values=pixel_values, output_hidden_states=True)\n",
151
- " image_features = vision_outputs.hidden_states\n",
152
- " embedded_images = image_adapter(image_features)\n",
153
- " embedded_images = embedded_images.to('cuda')\n",
154
- " prompt_embeds = text_model.model.embed_tokens(prompt.to('cuda'))\n",
155
- " assert prompt_embeds.shape == (1, prompt.shape[1], text_model.config.hidden_size), f\"Prompt shape is {prompt_embeds.shape}, expected {(1, prompt.shape[1], text_model.config.hidden_size)}\"\n",
156
- " embedded_bos = text_model.model.embed_tokens(torch.tensor([[tokenizer.bos_token_id]], device=text_model.device, dtype=torch.int64))\n",
157
- " eot_embed = image_adapter.get_eot_embedding().unsqueeze(0).to(dtype=text_model.dtype)\n",
158
- " inputs_embeds = torch.cat([\n",
159
- " embedded_bos.expand(embedded_images.shape[0], -1, -1),\n",
160
- " embedded_images.to(dtype=embedded_bos.dtype),\n",
161
- " prompt_embeds.expand(embedded_images.shape[0], -1, -1),\n",
162
- " eot_embed.expand(embedded_images.shape[0], -1, -1),\n",
163
- " ], dim=1)\n",
164
- " input_ids = torch.cat([\n",
165
- " torch.tensor([[tokenizer.bos_token_id]], dtype=torch.long),\n",
166
- " torch.zeros((1, embedded_images.shape[1]), dtype=torch.long),\n",
167
- " prompt,\n",
168
- " torch.tensor([[tokenizer.convert_tokens_to_ids(\"<|eot_id|>\")]], dtype=torch.long),\n",
169
- " ], dim=1).to('cuda')\n",
170
- " attention_mask = torch.ones_like(input_ids)\n",
171
- " generate_ids = text_model.generate(input_ids, inputs_embeds=inputs_embeds, attention_mask=attention_mask, max_new_tokens=300, do_sample=True, suppress_tokens=None) # Uses the default which is temp=0.6, top_p=0.9\n",
172
- " generate_ids = generate_ids[:, input_ids.shape[1]:]\n",
173
- " if generate_ids[0][-1] == tokenizer.eos_token_id or generate_ids[0][-1] == tokenizer.convert_tokens_to_ids(\"<|eot_id|>\"):\n",
174
- " generate_ids = generate_ids[:, :-1]\n",
175
- " caption = tokenizer.batch_decode(generate_ids, skip_special_tokens=False, clean_up_tokenization_spaces=False)[0]\n",
176
- " caption = f'{caption.strip()}'.replace('Prompt: Describe the image in 400 words','')\n",
177
- " return caption"
178
- ],
179
- "metadata": {
180
- "id": "0zaheBIsw_dc"
181
- },
182
- "execution_count": null,
183
- "outputs": []
184
- },
185
- {
186
- "cell_type": "code",
187
- "source": [
188
- "import os\n",
189
- "from PIL import Image\n",
190
- "home_directory = '/content/'\n",
191
- "using_Kaggle = os.environ.get('KAGGLE_URL_BASE','')\n",
192
- "if using_Kaggle : home_directory = '/kaggle/working/'\n",
193
- "%cd {home_directory}\n",
194
- "\n",
195
- "def my_mkdirs(folder):\n",
196
- " if os.path.exists(folder)==False:\n",
197
- " os.makedirs(folder)\n",
198
- "\n",
199
- "\n",
200
- "tgt_folder = f'/content/drive/MyDrive/tmp/'\n",
201
- "my_mkdirs(f'{tgt_folder}')\n",
202
- "\n",
203
- "\n",
204
- "src_folder = '/content/drive/MyDrive/wild party/'\n",
205
- "suffixes = ['.png', '.jpeg' , '.webp' , '.jpg']\n",
206
- "num = 1\n",
207
- "for filename in os.listdir(src_folder):\n",
208
- " for suffix in suffixes:\n",
209
- " if not filename.find(suffix)>-1: continue\n",
210
- " print(filename)\n",
211
- " %cd {src_folder}\n",
212
- " input_image = Image.open(f\"{filename}\").convert('RGB')\n",
213
- " caption = stream_chat(input_image, \"descriptive\", \"formal\", \"any\")\n",
214
- " print(f\"...\\n\\n...caption for {filename}.{suffix}\\n\\n...\")\n",
215
- " print(caption)\n",
216
- " #---------#\n",
217
- " %cd {tgt_folder}\n",
218
- " f = open(f\"{num}.txt\", \"w\")\n",
219
- " f.write(f'{caption}')\n",
220
- " f.close()\n",
221
- " input_image.save(f'{num}.png', \"PNG\")\n",
222
- " num = num+1"
223
- ],
224
- "metadata": {
225
- "id": "J811UZU6xZEo"
226
- },
227
- "execution_count": null,
228
- "outputs": []
229
- },
230
- {
231
- "cell_type": "code",
232
- "source": [
233
- "from google.colab import runtime\n",
234
- "runtime.unassign()"
235
- ],
236
- "metadata": {
237
- "id": "kM4TpfdB1amt"
238
- },
239
- "execution_count": null,
240
- "outputs": []
241
- }
242
- ]
243
- }
 
1
+ {"nbformat":4,"nbformat_minor":0,"metadata":{"colab":{"provenance":[{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1739735064367}],"gpuType":"T4"},"kernelspec":{"name":"python3","display_name":"Python 3"},"language_info":{"name":"python"},"accelerator":"GPU","widgets":{"application/vnd.jupyter.widget-state+json":{"b89006df8aee42ecaf385b89e5df4acf":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_58970cc506754327b9c1c2586b5519f5","IPY_MODEL_648b1c4e67084eecbc207c65bccaff9c","IPY_MODEL_2a2bb4f2290d4df1963c54f9150c7870"],"layout":"IPY_MODEL_481634b2c06f4d6f9645e86f2cf0f3c3"}},"58970cc506754327b9c1c2586b5519f5":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_616bab3bd7f145d79412a8dc2cd97af7","placeholder":"​","style":"IPY_MODEL_c8e0e598b7724db0893f96298b51b652","value":"config.json: 100%"}},"648b1c4e67084eecbc207c65bccaff9c":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_055de6f70357469d85e4c8695705ed7f","max":1523,"min":0,"orientation":"horizontal","style":"IPY_MODEL_5fc17e37d17049a5bbae17151a6ac0ab","value":1523}},"2a2bb4f2290d4df1963c54f9150c7870":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_a742183fb23241db8c717bffc88ad11c","placeholder":"​","style":"IPY_MODEL_6d6a8e3473d74352b77056f07c812c61","value":" 1.52k/1.52k [00:00&lt;00:00, 92.3kB/s]"}},"481634b2c06f4d6f9645e86f2cf0f3c3":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"616bab3bd7f145d79412a8dc2cd97af7":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"c8e0e598b7724db0893f96298b51b652":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"055de6f70357469d85e4c8695705ed7f":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"5fc17e37d17049a5bbae17151a6ac0ab":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"a742183fb23241db8c717bffc88ad11c":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"6d6a8e3473d74352b77056f07c812c61":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"2153314e67304a77b518a65ca9c34a4d":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_37ec7b26f5f54b65962b896d156e27e1","IPY_MODEL_03eba632f74744aaadc30435a4766d9e","IPY_MODEL_4f64e9ed0ac2497fa7dfa5ff73a1d2bc"],"layout":"IPY_MODEL_3d989f76f97142e99152327b3a1a91a4"}},"37ec7b26f5f54b65962b896d156e27e1":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_082fbc7e17134b7a8ebfb1a0f04e1933","placeholder":"​","style":"IPY_MODEL_fe328b7afea347aebc8beef40122fd3a","value":"model.safetensors:  27%"}},"03eba632f74744aaadc30435a4766d9e":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"","description":"","description_tooltip":null,"layout":"IPY_MODEL_6a6b435daed54dafad714bd4c5577f1a","max":5702746390,"min":0,"orientation":"horizontal","style":"IPY_MODEL_e2b39602d3ea47fa9b1b38470386bbba","value":1562378240}},"4f64e9ed0ac2497fa7dfa5ff73a1d2bc":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_a43970bd01864771abbea368623242b9","placeholder":"​","style":"IPY_MODEL_5637508040014a1cab3274eebe522670","value":" 1.56G/5.70G [00:36&lt;01:39, 41.7MB/s]"}},"3d989f76f97142e99152327b3a1a91a4":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"082fbc7e17134b7a8ebfb1a0f04e1933":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"fe328b7afea347aebc8beef40122fd3a":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"6a6b435daed54dafad714bd4c5577f1a":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"e2b39602d3ea47fa9b1b38470386bbba":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"a43970bd01864771abbea368623242b9":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"5637508040014a1cab3274eebe522670":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}}}}},"cells":[{"cell_type":"code","execution_count":1,"metadata":{"id":"Dwr7gk5OwuGC","executionInfo":{"status":"ok","timestamp":1739734102932,"user_tz":-60,"elapsed":18853,"user":{"displayName":"","userId":""}},"outputId":"4178d27e-4276-4d40-98d8-b0d1428ddef7","colab":{"base_uri":"https://localhost:8080/"}},"outputs":[{"output_type":"stream","name":"stdout","text":["Mounted at /content/drive\n"]}],"source":["from google.colab import drive\n","drive.mount('/content/drive')"]},{"cell_type":"code","source":["!apt -y install -qq aria2\n","!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/joy-caption-alpha-one/raw/main/text_model/adapter_config.json -d /content/joy/text_model -o adapter_config.json\n","!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/joy-caption-alpha-one/resolve/main/text_model/adapter_model.safetensors -d /content/joy/text_model -o adapter_model.safetensors\n","!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/joy-caption-alpha-one/resolve/main/clip_model.pt -d /content/joy -o clip_model.pt\n","!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/joy-caption-alpha-one/raw/main/config.yaml -d /content/joy -o config.yaml\n","!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/joy-caption-alpha-one/resolve/main/image_adapter.pt -d /content/joy -o image_adapter.pt\n","\n","!pip install peft bitsandbytes\n","from huggingface_hub import InferenceClient\n","from torch import nn\n","from transformers import AutoModel, AutoProcessor, AutoTokenizer, PreTrainedTokenizer, PreTrainedTokenizerFast, AutoModelForCausalLM\n","import torch\n","import torch.amp.autocast_mode\n","from PIL import Image\n","import os\n","import torchvision.transforms.functional as TVF\n","\n","CLIP_PATH = \"google/siglip-so400m-patch14-384\"\n","MODEL_PATH = \"unsloth/Meta-Llama-3.1-8B-bnb-4bit\"\n","CAPTION_TYPE_MAP = {\n"," (\"descriptive\", \"formal\", False, False): [\"Describe the image in 400 words\"],\n"," (\"descriptive\", \"formal\", False, True): [\"Write a descriptive caption for this image in a formal tone within {word_count} words.\"],\n"," (\"descriptive\", \"formal\", True, False): [\"Write a {length} descriptive caption for this image in a formal tone.\"],\n"," (\"descriptive\", \"informal\", False, False): [\"Write a descriptive caption for this image in a casual tone.\"],\n"," (\"descriptive\", \"informal\", False, True): [\"Write a descriptive caption for this image in a casual tone within {word_count} words.\"],\n"," (\"descriptive\", \"informal\", True, False): [\"Write a {length} descriptive caption for this image in a casual tone.\"],\n"," (\"training_prompt\", \"formal\", False, False): [\"Write a stable diffusion prompt for this image.\"],\n"," (\"training_prompt\", \"formal\", False, True): [\"Write a stable diffusion prompt for this image within {word_count} words.\"],\n"," (\"training_prompt\", \"formal\", True, False): [\"Write a {length} stable diffusion prompt for this image.\"],\n"," (\"rng-tags\", \"formal\", False, False): [\"Write a list of Booru tags for this image.\"],\n"," (\"rng-tags\", \"formal\", False, True): [\"Write a list of Booru tags for this image within {word_count} words.\"],\n"," (\"rng-tags\", \"formal\", True, False): [\"Write a {length} list of Booru tags for this image.\"],\n","}\n","\n","class ImageAdapter(nn.Module):\n","\tdef __init__(self, input_features: int, output_features: int, ln1: bool, pos_emb: bool, num_image_tokens: int, deep_extract: bool):\n","\t\tsuper().__init__()\n","\t\tself.deep_extract = deep_extract\n","\t\tif self.deep_extract:\n","\t\t\tinput_features = input_features * 5\n","\t\tself.linear1 = nn.Linear(input_features, output_features)\n","\t\tself.activation = nn.GELU()\n","\t\tself.linear2 = nn.Linear(output_features, output_features)\n","\t\tself.ln1 = nn.Identity() if not ln1 else nn.LayerNorm(input_features)\n","\t\tself.pos_emb = None if not pos_emb else nn.Parameter(torch.zeros(num_image_tokens, input_features))\n","\t\tself.other_tokens = nn.Embedding(3, output_features)\n","\t\tself.other_tokens.weight.data.normal_(mean=0.0, std=0.02) # Matches HF's implementation of llama3\n","\tdef forward(self, vision_outputs: torch.Tensor):\n","\t\tif self.deep_extract:\n","\t\t\tx = torch.concat((\n","\t\t\t\tvision_outputs[-2],\n","\t\t\t\tvision_outputs[3],\n","\t\t\t\tvision_outputs[7],\n","\t\t\t\tvision_outputs[13],\n","\t\t\t\tvision_outputs[20],\n","\t\t\t), dim=-1)\n","\t\t\tassert len(x.shape) == 3, f\"Expected 3, got {len(x.shape)}\" # batch, tokens, features\n","\t\t\tassert x.shape[-1] == vision_outputs[-2].shape[-1] * 5, f\"Expected {vision_outputs[-2].shape[-1] * 5}, got {x.shape[-1]}\"\n","\t\telse:\n","\t\t\tx = vision_outputs[-2]\n","\t\tx = self.ln1(x)\n","\t\tif self.pos_emb is not None:\n","\t\t\tassert x.shape[-2:] == self.pos_emb.shape, f\"Expected {self.pos_emb.shape}, got {x.shape[-2:]}\"\n","\t\t\tx = x + self.pos_emb\n","\t\tx = self.linear1(x)\n","\t\tx = self.activation(x)\n","\t\tx = self.linear2(x)\n","\t\tother_tokens = self.other_tokens(torch.tensor([0, 1], device=self.other_tokens.weight.device).expand(x.shape[0], -1))\n","\t\tassert other_tokens.shape == (x.shape[0], 2, x.shape[2]), f\"Expected {(x.shape[0], 2, x.shape[2])}, got {other_tokens.shape}\"\n","\t\tx = torch.cat((other_tokens[:, 0:1], x, other_tokens[:, 1:2]), dim=1)\n","\t\treturn x\n","\tdef get_eot_embedding(self):\n","\t\treturn self.other_tokens(torch.tensor([2], device=self.other_tokens.weight.device)).squeeze(0)\n","\n","clip_processor = AutoProcessor.from_pretrained(CLIP_PATH)\n","clip_model = AutoModel.from_pretrained(CLIP_PATH)\n","clip_model = clip_model.vision_model\n","checkpoint = torch.load(\"/content/joy/clip_model.pt\", map_location='cpu')\n","checkpoint = {k.replace(\"_orig_mod.module.\", \"\"): v for k, v in checkpoint.items()}\n","clip_model.load_state_dict(checkpoint)\n","# del checkpoint\n","clip_model.eval()\n","clip_model.requires_grad_(False)\n","clip_model.to(\"cuda\")\n","tokenizer=''\n","#tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH, use_fast=False)\n","tokenizer = AutoTokenizer.from_pretrained(\"unsloth/Meta-Llama-3.1-8B-bnb-4bit\")\n","assert isinstance(tokenizer, PreTrainedTokenizer) or isinstance(tokenizer, PreTrainedTokenizerFast), f\"Tokenizer is of type {type(tokenizer)}\"\n","text_model = AutoModelForCausalLM.from_pretrained(MODEL_PATH, load_in_8bit=True, device_map=\"auto\", torch_dtype=torch.bfloat16)\n","text_model.load_adapter(\"/content/joy/text_model\")\n","text_model.eval()\n","image_adapter = ImageAdapter(clip_model.config.hidden_size, text_model.config.hidden_size, False, False, 38, False)\n","image_adapter.load_state_dict(torch.load(\"/content/joy/image_adapter.pt\", map_location=\"cpu\"))\n","image_adapter.eval()\n","image_adapter.to(\"cuda\")\n","\n","@torch.no_grad()\n","def stream_chat(input_image: Image.Image, caption_type: str, caption_tone: str, caption_length: str | int) -> str:\n"," torch.cuda.empty_cache()\n"," length = None if caption_length == \"any\" else caption_length\n"," if isinstance(length, str):\n"," try:\n"," length = int(length)\n"," except ValueError:\n"," pass\n"," if caption_type == \"rng-tags\" or caption_type == \"training_prompt\":\n"," caption_tone = \"formal\"\n"," prompt_key = (caption_type, caption_tone, isinstance(length, str), isinstance(length, int))\n"," if prompt_key not in CAPTION_TYPE_MAP:\n"," raise ValueError(f\"Invalid caption type: {prompt_key}\")\n"," prompt_str = CAPTION_TYPE_MAP[prompt_key][0].format(length=length, word_count=length)\n"," print(f\"Prompt: {prompt_str}\")\n"," image = input_image.resize((384, 384), Image.LANCZOS)\n"," pixel_values = TVF.pil_to_tensor(image).unsqueeze(0) / 255.0\n"," pixel_values = TVF.normalize(pixel_values, [0.5], [0.5])\n"," pixel_values = pixel_values.to('cuda')\n"," prompt = tokenizer.encode(prompt_str, return_tensors='pt', padding=False, truncation=False, add_special_tokens=False)\n"," with torch.amp.autocast_mode.autocast('cuda', enabled=True):\n"," vision_outputs = clip_model(pixel_values=pixel_values, output_hidden_states=True)\n"," image_features = vision_outputs.hidden_states\n"," embedded_images = image_adapter(image_features)\n"," embedded_images = embedded_images.to('cuda')\n"," prompt_embeds = text_model.model.embed_tokens(prompt.to('cuda'))\n"," assert prompt_embeds.shape == (1, prompt.shape[1], text_model.config.hidden_size), f\"Prompt shape is {prompt_embeds.shape}, expected {(1, prompt.shape[1], text_model.config.hidden_size)}\"\n"," embedded_bos = text_model.model.embed_tokens(torch.tensor([[tokenizer.bos_token_id]], device=text_model.device, dtype=torch.int64))\n"," eot_embed = image_adapter.get_eot_embedding().unsqueeze(0).to(dtype=text_model.dtype)\n"," inputs_embeds = torch.cat([\n"," embedded_bos.expand(embedded_images.shape[0], -1, -1),\n"," embedded_images.to(dtype=embedded_bos.dtype),\n"," prompt_embeds.expand(embedded_images.shape[0], -1, -1),\n"," eot_embed.expand(embedded_images.shape[0], -1, -1),\n"," ], dim=1)\n"," input_ids = torch.cat([\n"," torch.tensor([[tokenizer.bos_token_id]], dtype=torch.long),\n"," torch.zeros((1, embedded_images.shape[1]), dtype=torch.long),\n"," prompt,\n"," torch.tensor([[tokenizer.convert_tokens_to_ids(\"<|eot_id|>\")]], dtype=torch.long),\n"," ], dim=1).to('cuda')\n"," attention_mask = torch.ones_like(input_ids)\n"," generate_ids = text_model.generate(input_ids, inputs_embeds=inputs_embeds, attention_mask=attention_mask, max_new_tokens=300, do_sample=True, suppress_tokens=None) # Uses the default which is temp=0.6, top_p=0.9\n"," generate_ids = generate_ids[:, input_ids.shape[1]:]\n"," if generate_ids[0][-1] == tokenizer.eos_token_id or generate_ids[0][-1] == tokenizer.convert_tokens_to_ids(\"<|eot_id|>\"):\n"," generate_ids = generate_ids[:, :-1]\n"," caption = tokenizer.batch_decode(generate_ids, skip_special_tokens=False, clean_up_tokenization_spaces=False)[0]\n"," caption = f'{caption.strip()}'.replace('Prompt: Describe the image in 400 words','')\n"," return caption"],"metadata":{"id":"0zaheBIsw_dc","outputId":"d12f0052-ac0d-4c22-93cc-b5f5bac31f26","colab":{"base_uri":"https://localhost:8080/","height":893,"referenced_widgets":["b89006df8aee42ecaf385b89e5df4acf","58970cc506754327b9c1c2586b5519f5","648b1c4e67084eecbc207c65bccaff9c","2a2bb4f2290d4df1963c54f9150c7870","481634b2c06f4d6f9645e86f2cf0f3c3","616bab3bd7f145d79412a8dc2cd97af7","c8e0e598b7724db0893f96298b51b652","055de6f70357469d85e4c8695705ed7f","5fc17e37d17049a5bbae17151a6ac0ab","a742183fb23241db8c717bffc88ad11c","6d6a8e3473d74352b77056f07c812c61","2153314e67304a77b518a65ca9c34a4d","37ec7b26f5f54b65962b896d156e27e1","03eba632f74744aaadc30435a4766d9e","4f64e9ed0ac2497fa7dfa5ff73a1d2bc","3d989f76f97142e99152327b3a1a91a4","082fbc7e17134b7a8ebfb1a0f04e1933","fe328b7afea347aebc8beef40122fd3a","6a6b435daed54dafad714bd4c5577f1a","e2b39602d3ea47fa9b1b38470386bbba","a43970bd01864771abbea368623242b9","5637508040014a1cab3274eebe522670"]}},"execution_count":null,"outputs":[{"output_type":"stream","name":"stdout","text":["aria2 is already the newest version (1.36.0-1).\n","0 upgraded, 0 newly installed, 0 to remove and 20 not upgraded.\n","\n","Download Results:\n","gid |stat|avg speed |path/URI\n","======+====+===========+=======================================================\n","c14bf0|\u001b[1;32mOK\u001b[0m | 0B/s|/content/joy/text_model/adapter_config.json\n","\n","Status Legend:\n","(OK):download completed.\n","\n","Download Results:\n","gid |stat|avg speed |path/URI\n","======+====+===========+=======================================================\n","1ca36c|\u001b[1;32mOK\u001b[0m | 0B/s|/content/joy/text_model/adapter_model.safetensors\n","\n","Status Legend:\n","(OK):download completed.\n","\n","Download Results:\n","gid |stat|avg speed |path/URI\n","======+====+===========+=======================================================\n","643a5c|\u001b[1;32mOK\u001b[0m | 0B/s|/content/joy/clip_model.pt\n","\n","Status Legend:\n","(OK):download completed.\n","\n","Download Results:\n","gid |stat|avg speed |path/URI\n","======+====+===========+=======================================================\n","fe604f|\u001b[1;32mOK\u001b[0m | 0B/s|/content/joy/config.yaml\n","\n","Status Legend:\n","(OK):download completed.\n","\n","Download Results:\n","gid |stat|avg speed |path/URI\n","======+====+===========+=======================================================\n","1f6b74|\u001b[1;32mOK\u001b[0m | 0B/s|/content/joy/image_adapter.pt\n","\n","Status Legend:\n","(OK):download completed.\n","Requirement already satisfied: peft in /usr/local/lib/python3.11/dist-packages (0.14.0)\n","Requirement already satisfied: bitsandbytes in /usr/local/lib/python3.11/dist-packages (0.45.2)\n","Requirement already satisfied: numpy>=1.17 in /usr/local/lib/python3.11/dist-packages (from peft) (1.26.4)\n","Requirement already satisfied: packaging>=20.0 in /usr/local/lib/python3.11/dist-packages (from peft) (24.2)\n","Requirement already satisfied: psutil in /usr/local/lib/python3.11/dist-packages (from peft) (5.9.5)\n","Requirement already satisfied: pyyaml in /usr/local/lib/python3.11/dist-packages (from peft) (6.0.2)\n","Requirement already satisfied: torch>=1.13.0 in /usr/local/lib/python3.11/dist-packages (from peft) (2.5.1+cu124)\n","Requirement already satisfied: transformers in /usr/local/lib/python3.11/dist-packages (from peft) (4.48.3)\n","Requirement already satisfied: tqdm in /usr/local/lib/python3.11/dist-packages (from peft) (4.67.1)\n","Requirement already satisfied: accelerate>=0.21.0 in /usr/local/lib/python3.11/dist-packages (from peft) (1.3.0)\n","Requirement already satisfied: safetensors in /usr/local/lib/python3.11/dist-packages (from peft) (0.5.2)\n","Requirement already satisfied: huggingface-hub>=0.25.0 in /usr/local/lib/python3.11/dist-packages (from peft) (0.28.1)\n","Requirement already satisfied: filelock in /usr/local/lib/python3.11/dist-packages (from huggingface-hub>=0.25.0->peft) (3.17.0)\n","Requirement already satisfied: fsspec>=2023.5.0 in /usr/local/lib/python3.11/dist-packages (from huggingface-hub>=0.25.0->peft) (2024.10.0)\n","Requirement already satisfied: requests in /usr/local/lib/python3.11/dist-packages (from huggingface-hub>=0.25.0->peft) (2.32.3)\n","Requirement already satisfied: typing-extensions>=3.7.4.3 in /usr/local/lib/python3.11/dist-packages (from huggingface-hub>=0.25.0->peft) (4.12.2)\n","Requirement already satisfied: networkx in /usr/local/lib/python3.11/dist-packages (from torch>=1.13.0->peft) (3.4.2)\n","Requirement already satisfied: jinja2 in /usr/local/lib/python3.11/dist-packages (from torch>=1.13.0->peft) (3.1.5)\n","Requirement already satisfied: nvidia-cuda-nvrtc-cu12==12.4.127 in /usr/local/lib/python3.11/dist-packages (from torch>=1.13.0->peft) (12.4.127)\n","Requirement already satisfied: nvidia-cuda-runtime-cu12==12.4.127 in /usr/local/lib/python3.11/dist-packages (from torch>=1.13.0->peft) (12.4.127)\n","Requirement already satisfied: nvidia-cuda-cupti-cu12==12.4.127 in /usr/local/lib/python3.11/dist-packages (from torch>=1.13.0->peft) (12.4.127)\n","Requirement already satisfied: nvidia-cudnn-cu12==9.1.0.70 in /usr/local/lib/python3.11/dist-packages (from torch>=1.13.0->peft) (9.1.0.70)\n","Requirement already satisfied: nvidia-cublas-cu12==12.4.5.8 in /usr/local/lib/python3.11/dist-packages (from torch>=1.13.0->peft) (12.4.5.8)\n","Requirement already satisfied: nvidia-cufft-cu12==11.2.1.3 in /usr/local/lib/python3.11/dist-packages (from torch>=1.13.0->peft) (11.2.1.3)\n","Requirement already satisfied: nvidia-curand-cu12==10.3.5.147 in /usr/local/lib/python3.11/dist-packages (from torch>=1.13.0->peft) (10.3.5.147)\n","Requirement already satisfied: nvidia-cusolver-cu12==11.6.1.9 in /usr/local/lib/python3.11/dist-packages (from torch>=1.13.0->peft) (11.6.1.9)\n","Requirement already satisfied: nvidia-cusparse-cu12==12.3.1.170 in /usr/local/lib/python3.11/dist-packages (from torch>=1.13.0->peft) (12.3.1.170)\n","Requirement already satisfied: nvidia-nccl-cu12==2.21.5 in /usr/local/lib/python3.11/dist-packages (from torch>=1.13.0->peft) (2.21.5)\n","Requirement already satisfied: nvidia-nvtx-cu12==12.4.127 in /usr/local/lib/python3.11/dist-packages (from torch>=1.13.0->peft) (12.4.127)\n","Requirement already satisfied: nvidia-nvjitlink-cu12==12.4.127 in /usr/local/lib/python3.11/dist-packages (from torch>=1.13.0->peft) (12.4.127)\n","Requirement already satisfied: triton==3.1.0 in /usr/local/lib/python3.11/dist-packages (from torch>=1.13.0->peft) (3.1.0)\n","Requirement already satisfied: sympy==1.13.1 in /usr/local/lib/python3.11/dist-packages (from torch>=1.13.0->peft) (1.13.1)\n","Requirement already satisfied: mpmath<1.4,>=1.1.0 in /usr/local/lib/python3.11/dist-packages (from sympy==1.13.1->torch>=1.13.0->peft) (1.3.0)\n","Requirement already satisfied: regex!=2019.12.17 in /usr/local/lib/python3.11/dist-packages (from transformers->peft) (2024.11.6)\n","Requirement already satisfied: tokenizers<0.22,>=0.21 in /usr/local/lib/python3.11/dist-packages (from transformers->peft) (0.21.0)\n","Requirement already satisfied: MarkupSafe>=2.0 in /usr/local/lib/python3.11/dist-packages (from jinja2->torch>=1.13.0->peft) (3.0.2)\n","Requirement already satisfied: charset-normalizer<4,>=2 in /usr/local/lib/python3.11/dist-packages (from requests->huggingface-hub>=0.25.0->peft) (3.4.1)\n","Requirement already satisfied: idna<4,>=2.5 in /usr/local/lib/python3.11/dist-packages (from requests->huggingface-hub>=0.25.0->peft) (3.10)\n","Requirement already satisfied: urllib3<3,>=1.21.1 in /usr/local/lib/python3.11/dist-packages (from requests->huggingface-hub>=0.25.0->peft) (2.3.0)\n","Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.11/dist-packages (from requests->huggingface-hub>=0.25.0->peft) (2025.1.31)\n"]},{"output_type":"stream","name":"stderr","text":["<ipython-input-7-250a868fcd30>:78: FutureWarning: You are using `torch.load` with `weights_only=False` (the current default value), which uses the default pickle module implicitly. It is possible to construct malicious pickle data which will execute arbitrary code during unpickling (See https://github.com/pytorch/pytorch/blob/main/SECURITY.md#untrusted-models for more details). In a future release, the default value for `weights_only` will be flipped to `True`. This limits the functions that could be executed during unpickling. Arbitrary objects will no longer be allowed to be loaded via this mode unless they are explicitly allowlisted by the user via `torch.serialization.add_safe_globals`. We recommend you start setting `weights_only=True` for any use case where you don't have full control of the loaded file. Please open an issue on GitHub for any issues related to this experimental feature.\n"," checkpoint = torch.load(\"/content/joy/clip_model.pt\", map_location='cpu')\n"]},{"output_type":"display_data","data":{"text/plain":["config.json: 0%| | 0.00/1.52k [00:00<?, ?B/s]"],"application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"b89006df8aee42ecaf385b89e5df4acf"}},"metadata":{}},{"output_type":"stream","name":"stderr","text":["The `load_in_4bit` and `load_in_8bit` arguments are deprecated and will be removed in the future versions. Please, pass a `BitsAndBytesConfig` object in `quantization_config` argument instead.\n","Unused kwargs: ['_load_in_4bit', '_load_in_8bit', 'quant_method']. These kwargs are not used in <class 'transformers.utils.quantization_config.BitsAndBytesConfig'>.\n","/usr/local/lib/python3.11/dist-packages/transformers/quantizers/auto.py:195: UserWarning: You passed `quantization_config` or equivalent parameters to `from_pretrained` but the model you're loading already has a `quantization_config` attribute. The `quantization_config` from the model will be used.\n"," warnings.warn(warning_msg)\n"]},{"output_type":"display_data","data":{"text/plain":["model.safetensors: 0%| | 0.00/5.70G [00:00<?, ?B/s]"],"application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"2153314e67304a77b518a65ca9c34a4d"}},"metadata":{}}]},{"cell_type":"code","source":["import os\n","from PIL import Image\n","home_directory = '/content/'\n","using_Kaggle = os.environ.get('KAGGLE_URL_BASE','')\n","if using_Kaggle : home_directory = '/kaggle/working/'\n","%cd {home_directory}\n","\n","def my_mkdirs(folder):\n"," if os.path.exists(folder)==False:\n"," os.makedirs(folder)\n","\n","\n","tgt_folder = f'/content/tmp/'\n","my_mkdirs(f'{tgt_folder}')\n","\n","\n","src_folder = '/content/'\n","suffixes = ['.png', '.jpeg' , '.webp' , '.jpg']\n","num = 1\n","for filename in os.listdir(src_folder):\n"," for suffix in suffixes:\n"," if not filename.find(suffix)>-1: continue\n"," print(filename)\n"," %cd {src_folder}\n"," input_image = Image.open(f\"{filename}\").convert('RGB')\n"," caption = stream_chat(input_image, \"descriptive\", \"formal\", \"any\")\n"," print(f\"...\\n\\n...caption for {filename}.{suffix}\\n\\n...\")\n"," print(caption)\n"," #---------#\n"," %cd {tgt_folder}\n"," f = open(f\"{num}.txt\", \"w\")\n"," f.write(f'{caption}')\n"," f.close()\n"," input_image.save(f'{num}.png', \"PNG\")\n"," num = num+1"],"metadata":{"id":"J811UZU6xZEo"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["import shutil\n","shutil.make_archive('nsfw_captions', format='zip', root_dir=f'{tgt_folder}')\n","\n","\n","\n","\n","\n"],"metadata":{"id":"5EztLCjkPq4U"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["from google.colab import runtime\n","#runtime.unassign()"],"metadata":{"id":"kM4TpfdB1amt"},"execution_count":null,"outputs":[]}]}