MegaTronX commited on
Commit
094d470
·
verified ·
1 Parent(s): 4aff262

Update joycaption.py

Browse files
Files changed (1) hide show
  1. joycaption.py +485 -485
joycaption.py CHANGED
@@ -1,485 +1,485 @@
1
- import os
2
- if os.environ.get("SPACES_ZERO_GPU") is not None:
3
- import spaces
4
- else:
5
- class spaces:
6
- @staticmethod
7
- def GPU(func):
8
- def wrapper(*args, **kwargs):
9
- return func(*args, **kwargs)
10
- return wrapper
11
- import gradio as gr
12
- from huggingface_hub import InferenceClient, HfApi
13
- from torch import nn
14
- from transformers import AutoModel, AutoProcessor, AutoTokenizer, PreTrainedTokenizer, PreTrainedTokenizerFast, AutoModelForCausalLM, LlavaForConditionalGeneration
15
- from pathlib import Path
16
- import torch
17
- import torch.amp.autocast_mode
18
- from PIL import Image
19
- import torchvision.transforms.functional as TVF
20
- import gc
21
- from peft import PeftModel
22
- from typing import Union
23
-
24
- LOAD_IN_NF4 = True
25
-
26
- if os.environ.get("SPACES_ZERO_GPU") is not None:
27
- import subprocess
28
- LOAD_IN_NF4 = False # If true, Custom VLM LoRA doesn't work initially. The rest are fine.
29
- subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True)
30
-
31
- BASE_DIR = Path(__file__).resolve().parent # Define the base directory
32
- device = "cuda" if torch.cuda.is_available() else "cpu"
33
- HF_TOKEN = os.environ.get("HF_TOKEN", None)
34
- use_inference_client = False
35
- PIXTRAL_PATHS = ["SeanScripts/pixtral-12b-nf4", "mistral-community/pixtral-12b"]
36
-
37
- llm_models = {
38
- "Orenguteng/Llama-3.1-8B-Lexi-Uncensored-V2": None,
39
- #PIXTRAL_PATHS[0]: None,
40
- "bunnycore/LLama-3.1-8B-Matrix": None,
41
- "Sao10K/Llama-3.1-8B-Stheno-v3.4": None,
42
- "unsloth/Meta-Llama-3.1-8B-bnb-4bit": None,
43
- "DevQuasar/HermesNova-Llama-3.1-8B": None,
44
- "mergekit-community/L3.1-Boshima-b-FIX": None,
45
- #"chuanli11/Llama-3.2-3B-Instruct-uncensored": None, # Error(s) in loading state_dict for ImageAdapter:\n\tsize mismatch for linear1.weight: copying a param with shape torch.Size([4096, 1152]) from checkpoint, the shape in current model is torch.Size([3072, 1152]).\n\tsize mismatch for linear1.bias: copying a param with shape torch.Size([4096]) from checkpoint,
46
- "unsloth/Meta-Llama-3.1-8B-Instruct": None,
47
- }
48
-
49
- CLIP_PATH = "google/siglip-so400m-patch14-384"
50
- MODEL_PATH = list(llm_models.keys())[0]
51
- CHECKPOINT_PATH = BASE_DIR / Path("cgrkzexw-599808")
52
- LORA_PATH = CHECKPOINT_PATH / "text_model"
53
- TITLE = "<h1><center>JoyCaption Alpha Two (2024-09-26a)</center></h1>"
54
- CAPTION_TYPE_MAP = {
55
- "Descriptive": [
56
- "Write a descriptive caption for this image in a formal tone.",
57
- "Write a descriptive caption for this image in a formal tone within {word_count} words.",
58
- "Write a {length} descriptive caption for this image in a formal tone.",
59
- ],
60
- "Descriptive (Informal)": [
61
- "Write a descriptive caption for this image in a casual tone.",
62
- "Write a descriptive caption for this image in a casual tone within {word_count} words.",
63
- "Write a {length} descriptive caption for this image in a casual tone.",
64
- ],
65
- "Training Prompt": [
66
- "Write a stable diffusion prompt for this image.",
67
- "Write a stable diffusion prompt for this image within {word_count} words.",
68
- "Write a {length} stable diffusion prompt for this image.",
69
- ],
70
- "MidJourney": [
71
- "Write a MidJourney prompt for this image.",
72
- "Write a MidJourney prompt for this image within {word_count} words.",
73
- "Write a {length} MidJourney prompt for this image.",
74
- ],
75
- "Booru tag list": [
76
- "Write a list of Booru tags for this image.",
77
- "Write a list of Booru tags for this image within {word_count} words.",
78
- "Write a {length} list of Booru tags for this image.",
79
- ],
80
- "Booru-like tag list": [
81
- "Write a list of Booru-like tags for this image.",
82
- "Write a list of Booru-like tags for this image within {word_count} words.",
83
- "Write a {length} list of Booru-like tags for this image.",
84
- ],
85
- "Art Critic": [
86
- "Analyze this image like an art critic would with information about its composition, style, symbolism, the use of color, light, any artistic movement it might belong to, etc.",
87
- "Analyze this image like an art critic would with information about its composition, style, symbolism, the use of color, light, any artistic movement it might belong to, etc. Keep it within {word_count} words.",
88
- "Analyze this image like an art critic would with information about its composition, style, symbolism, the use of color, light, any artistic movement it might belong to, etc. Keep it {length}.",
89
- ],
90
- "Product Listing": [
91
- "Write a caption for this image as though it were a product listing.",
92
- "Write a caption for this image as though it were a product listing. Keep it under {word_count} words.",
93
- "Write a {length} caption for this image as though it were a product listing.",
94
- ],
95
- "Social Media Post": [
96
- "Write a caption for this image as if it were being used for a social media post.",
97
- "Write a caption for this image as if it were being used for a social media post. Limit the caption to {word_count} words.",
98
- "Write a {length} caption for this image as if it were being used for a social media post.",
99
- ],
100
- }
101
-
102
- class ImageAdapter(nn.Module):
103
- def __init__(self, input_features: int, output_features: int, ln1: bool, pos_emb: bool, num_image_tokens: int, deep_extract: bool):
104
- super().__init__()
105
- self.deep_extract = deep_extract
106
-
107
- if self.deep_extract:
108
- input_features = input_features * 5
109
-
110
- self.linear1 = nn.Linear(input_features, output_features)
111
- self.activation = nn.GELU()
112
- self.linear2 = nn.Linear(output_features, output_features)
113
- self.ln1 = nn.Identity() if not ln1 else nn.LayerNorm(input_features)
114
- self.pos_emb = None if not pos_emb else nn.Parameter(torch.zeros(num_image_tokens, input_features))
115
-
116
- # Other tokens (<|image_start|>, <|image_end|>, <|eot_id|>)
117
- self.other_tokens = nn.Embedding(3, output_features)
118
- self.other_tokens.weight.data.normal_(mean=0.0, std=0.02) # Matches HF's implementation of llama3
119
-
120
- def forward(self, vision_outputs: torch.Tensor):
121
- if self.deep_extract:
122
- x = torch.concat((
123
- vision_outputs[-2],
124
- vision_outputs[3],
125
- vision_outputs[7],
126
- vision_outputs[13],
127
- vision_outputs[20],
128
- ), dim=-1)
129
- assert len(x.shape) == 3, f"Expected 3, got {len(x.shape)}" # batch, tokens, features
130
- assert x.shape[-1] == vision_outputs[-2].shape[-1] * 5, f"Expected {vision_outputs[-2].shape[-1] * 5}, got {x.shape[-1]}"
131
- else:
132
- x = vision_outputs[-2]
133
-
134
- x = self.ln1(x)
135
-
136
- if self.pos_emb is not None:
137
- assert x.shape[-2:] == self.pos_emb.shape, f"Expected {self.pos_emb.shape}, got {x.shape[-2:]}"
138
- x = x + self.pos_emb
139
-
140
- x = self.linear1(x)
141
- x = self.activation(x)
142
- x = self.linear2(x)
143
-
144
- # <|image_start|>, IMAGE, <|image_end|>
145
- other_tokens = self.other_tokens(torch.tensor([0, 1], device=self.other_tokens.weight.device).expand(x.shape[0], -1))
146
- assert other_tokens.shape == (x.shape[0], 2, x.shape[2]), f"Expected {(x.shape[0], 2, x.shape[2])}, got {other_tokens.shape}"
147
- x = torch.cat((other_tokens[:, 0:1], x, other_tokens[:, 1:2]), dim=1)
148
-
149
- return x
150
-
151
- def get_eot_embedding(self):
152
- return self.other_tokens(torch.tensor([2], device=self.other_tokens.weight.device)).squeeze(0)
153
-
154
- # https://huggingface.co/docs/transformers/v4.44.2/gguf
155
- # https://github.com/city96/ComfyUI-GGUF/issues/7
156
- # https://github.com/THUDM/ChatGLM-6B/issues/18
157
- # https://github.com/meta-llama/llama/issues/394
158
- # https://huggingface.co/meta-llama/Meta-Llama-3-8B-Instruct/discussions/109
159
- # https://huggingface.co/docs/transformers/main/en/main_classes/quantization#offload-between-cpu-and-gpu
160
- # https://huggingface.co/google/flan-ul2/discussions/8
161
- # https://huggingface.co/blog/4bit-transformers-bitsandbytes
162
- # https://huggingface.co/docs/transformers/main/en/peft
163
- # https://huggingface.co/docs/transformers/main/en/peft#enable-and-disable-adapters
164
- # https://huggingface.co/docs/transformers/main/quantization/bitsandbytes?bnb=4-bit
165
- # https://huggingface.co/lllyasviel/flux1-dev-bnb-nf4
166
- # https://github.com/huggingface/transformers/issues/28515
167
- # https://gist.github.com/ChrisHayduk/1a53463331f52dca205e55982baf9930
168
- tokenizer = None
169
- text_model_client = None
170
- text_model = None
171
- image_adapter = None
172
- pixtral_model = None
173
- pixtral_processor = None
174
- def load_text_model(model_name: str=MODEL_PATH, gguf_file: Union[str, None]=None, is_nf4: bool=True, is_lora: bool=True):
175
- global tokenizer, text_model, image_adapter, pixtral_model, pixtral_processor, text_model_client, use_inference_client
176
- try:
177
- tokenizer = None
178
- text_model_client = None
179
- text_model = None
180
- image_adapter = None
181
- pixtral_model = None
182
- pixtral_processor = None
183
- torch.cuda.empty_cache()
184
- gc.collect()
185
- lora_device = "auto"
186
-
187
- from transformers import BitsAndBytesConfig
188
- nf4_config = BitsAndBytesConfig(load_in_4bit=True, bnb_4bit_quant_type="nf4",
189
- bnb_4bit_use_double_quant=True, bnb_4bit_compute_dtype=torch.bfloat16)
190
-
191
- if model_name in PIXTRAL_PATHS: # Pixtral
192
- print(f"Loading LLM: {model_name}")
193
- if is_nf4:
194
- pixtral_model = LlavaForConditionalGeneration.from_pretrained(model_name, quantization_config=nf4_config, device_map=device, torch_dtype=torch.bfloat16).eval()
195
- else:
196
- pixtral_model = LlavaForConditionalGeneration.from_pretrained(model_name, device_map=device, torch_dtype=torch.bfloat16).eval()
197
- pixtral_processor = AutoProcessor.from_pretrained(model_name)
198
- print(f"pixtral_model: {type(pixtral_model)}") #
199
- print(f"pixtral_processor: {type(pixtral_processor)}") #
200
- return
201
-
202
- print("Loading tokenizer")
203
- tokenizer = AutoTokenizer.from_pretrained(CHECKPOINT_PATH / "text_model", use_fast=True)
204
- assert isinstance(tokenizer, PreTrainedTokenizer) or isinstance(tokenizer, PreTrainedTokenizerFast), f"Tokenizer is of type {type(tokenizer)}"
205
-
206
- print(f"Loading LLM: {model_name}")
207
- if gguf_file:
208
- if device == "cpu":
209
- text_model = AutoModelForCausalLM.from_pretrained(model_name, gguf_file=gguf_file, device_map=device, torch_dtype=torch.bfloat16).eval()
210
- elif is_nf4:
211
- text_model = AutoModelForCausalLM.from_pretrained(model_name, gguf_file=gguf_file, quantization_config=nf4_config, device_map=device, torch_dtype=torch.bfloat16).eval()
212
- else:
213
- text_model = AutoModelForCausalLM.from_pretrained(model_name, gguf_file=gguf_file, device_map=lora_device, torch_dtype=torch.bfloat16).eval()
214
- else:
215
- if device == "cpu":
216
- text_model = AutoModelForCausalLM.from_pretrained(model_name, gguf_file=gguf_file, device_map=device, torch_dtype=torch.bfloat16).eval()
217
- elif is_nf4:
218
- text_model = AutoModelForCausalLM.from_pretrained(model_name, quantization_config=nf4_config, device_map=device, torch_dtype=torch.bfloat16).eval()
219
- else:
220
- text_model = AutoModelForCausalLM.from_pretrained(model_name, device_map=lora_device, torch_dtype=torch.bfloat16).eval()
221
-
222
- if is_lora and LORA_PATH.exists() and not is_nf4:
223
- print("Loading VLM's custom text model")
224
- if is_nf4: # omitted
225
- text_model = PeftModel.from_pretrained(model=text_model, model_id=LORA_PATH, device_map=device, quantization_config=nf4_config)
226
- else:
227
- text_model = PeftModel.from_pretrained(model=text_model, model_id=LORA_PATH, device_map=device)
228
- text_model = text_model.merge_and_unload(safe_merge=True) # to avoid PEFT bug https://github.com/huggingface/transformers/issues/28515
229
- else: print("VLM's custom text model is not loaded")
230
-
231
- print("Loading image adapter")
232
- image_adapter = ImageAdapter(clip_model.config.hidden_size, text_model.config.hidden_size, False, False, 38, False).eval().to("cpu")
233
- image_adapter.load_state_dict(torch.load(CHECKPOINT_PATH / "image_adapter.pt", map_location="cpu", weights_only=False))
234
- image_adapter.eval().to(device)
235
- except Exception as e:
236
- print(f"LLM load error: {e}")
237
- raise Exception(f"LLM load error: {e}") from e
238
- finally:
239
- torch.cuda.empty_cache()
240
- gc.collect()
241
-
242
- load_text_model.zerogpu = True
243
-
244
- # Load CLIP
245
- print("Loading CLIP")
246
- clip_processor = AutoProcessor.from_pretrained(CLIP_PATH)
247
- clip_model = AutoModel.from_pretrained(CLIP_PATH).vision_model
248
- assert (CHECKPOINT_PATH / "clip_model.pt").exists()
249
- if (CHECKPOINT_PATH / "clip_model.pt").exists():
250
- print("Loading VLM's custom vision model")
251
- checkpoint = torch.load(CHECKPOINT_PATH / "clip_model.pt", map_location='cpu', weights_only=False)
252
- checkpoint = {k.replace("_orig_mod.module.", ""): v for k, v in checkpoint.items()}
253
- clip_model.load_state_dict(checkpoint)
254
- del checkpoint
255
- clip_model.eval().requires_grad_(False).to(device)
256
-
257
- # Tokenizer
258
- # LLM
259
- # Image Adapter
260
- #load_text_model(PIXTRAL_PATHS[0])
261
- #print(f"pixtral_model: {type(pixtral_model)}") #
262
- #print(f"pixtral_processor: {type(pixtral_processor)}") #
263
- load_text_model(MODEL_PATH, None, LOAD_IN_NF4, True)
264
- #print(f"pixtral_model: {type(pixtral_model)}") #
265
- #print(f"pixtral_processor: {type(pixtral_processor)}") #
266
-
267
- @spaces.GPU
268
- @torch.inference_mode()
269
- def stream_chat_mod(input_image: Image.Image, caption_type: str, caption_length: Union[str, int], extra_options: list[str], name_input: str, custom_prompt: str,
270
- max_new_tokens: int=300, top_p: float=0.9, temperature: float=0.6, model_name: str=MODEL_PATH, progress=gr.Progress(track_tqdm=True)) -> tuple[str, str]:
271
- global tokenizer, text_model, image_adapter, pixtral_model, pixtral_processor, text_model_client, use_inference_client
272
- torch.cuda.empty_cache()
273
- gc.collect()
274
-
275
- # 'any' means no length specified
276
- length = None if caption_length == "any" else caption_length
277
-
278
- if isinstance(length, str):
279
- try:
280
- length = int(length)
281
- except ValueError:
282
- pass
283
-
284
- # Build prompt
285
- if length is None:
286
- map_idx = 0
287
- elif isinstance(length, int):
288
- map_idx = 1
289
- elif isinstance(length, str):
290
- map_idx = 2
291
- else:
292
- raise ValueError(f"Invalid caption length: {length}")
293
-
294
- prompt_str = CAPTION_TYPE_MAP[caption_type][map_idx]
295
-
296
- # Add extra options
297
- if len(extra_options) > 0:
298
- prompt_str += " " + " ".join(extra_options)
299
-
300
- # Add name, length, word_count
301
- prompt_str = prompt_str.format(name=name_input, length=caption_length, word_count=caption_length)
302
-
303
- if custom_prompt.strip() != "":
304
- prompt_str = custom_prompt.strip()
305
-
306
- # For debugging
307
- print(f"Prompt: {prompt_str}")
308
-
309
- # Pixtral
310
- if model_name in PIXTRAL_PATHS:
311
- print(f"pixtral_model: {type(pixtral_model)}") #
312
- print(f"pixtral_processor: {type(pixtral_processor)}") #
313
- input_images = [input_image.convert("RGB")]
314
- input_prompt = "[INST]Caption this image:\n[IMG][/INST]"
315
- inputs = pixtral_processor(images=input_images, text=input_prompt, return_tensors="pt").to(device)
316
- generate_ids = pixtral_model.generate(**inputs, max_new_tokens=max_new_tokens)
317
- output = pixtral_processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
318
- return input_prompt, output.strip()
319
-
320
- # Preprocess image
321
- # NOTE: I found the default processor for so400M to have worse results than just using PIL directly
322
- #image = clip_processor(images=input_image, return_tensors='pt').pixel_values
323
- image = input_image.resize((384, 384), Image.LANCZOS)
324
- pixel_values = TVF.pil_to_tensor(image).unsqueeze(0) / 255.0
325
- pixel_values = TVF.normalize(pixel_values, [0.5], [0.5])
326
- pixel_values = pixel_values.to(device)
327
-
328
- # Embed image
329
- # This results in Batch x Image Tokens x Features
330
- with torch.amp.autocast_mode.autocast(device, enabled=True):
331
- vision_outputs = clip_model(pixel_values=pixel_values, output_hidden_states=True)
332
- image_features = vision_outputs.hidden_states
333
- embedded_images = image_adapter(image_features)
334
- embedded_images = embedded_images.to(device)
335
-
336
- # Build the conversation
337
- convo = [
338
- {
339
- "role": "system",
340
- "content": "You are a helpful image captioner.",
341
- },
342
- {
343
- "role": "user",
344
- "content": prompt_str,
345
- },
346
- ]
347
-
348
- # Format the conversation
349
- convo_string = tokenizer.apply_chat_template(convo, tokenize = False, add_generation_prompt = True)
350
- assert isinstance(convo_string, str)
351
-
352
- # Tokenize the conversation
353
- # prompt_str is tokenized separately so we can do the calculations below
354
- convo_tokens = tokenizer.encode(convo_string, return_tensors="pt", add_special_tokens=False, truncation=False)
355
- prompt_tokens = tokenizer.encode(prompt_str, return_tensors="pt", add_special_tokens=False, truncation=False)
356
- assert isinstance(convo_tokens, torch.Tensor) and isinstance(prompt_tokens, torch.Tensor)
357
- convo_tokens = convo_tokens.squeeze(0) # Squeeze just to make the following easier
358
- prompt_tokens = prompt_tokens.squeeze(0)
359
-
360
- # Calculate where to inject the image
361
- eot_id_indices = (convo_tokens == tokenizer.convert_tokens_to_ids("<|eot_id|>")).nonzero(as_tuple=True)[0].tolist()
362
- assert len(eot_id_indices) == 2, f"Expected 2 <|eot_id|> tokens, got {len(eot_id_indices)}"
363
-
364
- preamble_len = eot_id_indices[1] - prompt_tokens.shape[0] # Number of tokens before the prompt
365
-
366
- # Embed the tokens
367
- convo_embeds = text_model.model.embed_tokens(convo_tokens.unsqueeze(0).to(device))
368
-
369
- # Construct the input
370
- input_embeds = torch.cat([
371
- convo_embeds[:, :preamble_len], # Part before the prompt
372
- embedded_images.to(dtype=convo_embeds.dtype), # Image
373
- convo_embeds[:, preamble_len:], # The prompt and anything after it
374
- ], dim=1).to(device)
375
-
376
- input_ids = torch.cat([
377
- convo_tokens[:preamble_len].unsqueeze(0),
378
- torch.zeros((1, embedded_images.shape[1]), dtype=torch.long), # Dummy tokens for the image (TODO: Should probably use a special token here so as not to confuse any generation algorithms that might be inspecting the input)
379
- convo_tokens[preamble_len:].unsqueeze(0),
380
- ], dim=1).to(device)
381
- attention_mask = torch.ones_like(input_ids)
382
-
383
- # Debugging
384
- #print(f"Input to model: {repr(tokenizer.decode(input_ids[0]))}")
385
-
386
- text_model.to(device)
387
- generate_ids = text_model.generate(input_ids, inputs_embeds=input_embeds, attention_mask=attention_mask, max_new_tokens=max_new_tokens,
388
- do_sample=True, suppress_tokens=None, top_p=top_p, temperature=temperature)
389
-
390
- # Trim off the prompt
391
- generate_ids = generate_ids[:, input_ids.shape[1]:]
392
- if generate_ids[0][-1] == tokenizer.eos_token_id or generate_ids[0][-1] == tokenizer.convert_tokens_to_ids("<|eot_id|>"):
393
- generate_ids = generate_ids[:, :-1]
394
-
395
- caption = tokenizer.batch_decode(generate_ids, skip_special_tokens=False, clean_up_tokenization_spaces=False)[0]
396
-
397
- return prompt_str, caption.strip()
398
-
399
-
400
- # https://huggingface.co/docs/transformers/v4.44.2/main_classes/text_generation#transformers.FlaxGenerationMixin.generate
401
- # https://github.com/huggingface/transformers/issues/6535
402
- # https://zenn.dev/hijikix/articles/8c445f4373fdcc ja
403
- # https://github.com/ggerganov/llama.cpp/discussions/7712
404
- # https://huggingface.co/docs/huggingface_hub/guides/inference#openai-compatibility
405
- # https://huggingface.co/docs/huggingface_hub/v0.24.6/en/package_reference/inference_client#huggingface_hub.InferenceClient.text_generation
406
-
407
-
408
- def is_repo_name(s):
409
- import re
410
- return re.fullmatch(r'^[^/,\s\"\']+/[^/,\s\"\']+$', s)
411
-
412
-
413
- def is_repo_exists(repo_id):
414
- try:
415
- api = HfApi(token=HF_TOKEN)
416
- if api.repo_exists(repo_id=repo_id): return True
417
- else: return False
418
- except Exception as e:
419
- print(f"Error: Failed to connect {repo_id}. {e}")
420
- return True # for safe
421
-
422
-
423
- def is_valid_repo(repo_id):
424
- import re
425
- try:
426
- if not re.fullmatch(r'^[^/,\s\"\']+/[^/,\s\"\']+$', repo_id): return False
427
- api = HfApi()
428
- if api.repo_exists(repo_id=repo_id): return True
429
- else: return False
430
- except Exception as e:
431
- print(f"Failed to connect {repo_id}. {e}")
432
- return False
433
-
434
-
435
- def get_text_model():
436
- return list(llm_models.keys())
437
-
438
-
439
- def is_gguf_repo(repo_id: str):
440
- try:
441
- api = HfApi(token=HF_TOKEN)
442
- if not is_repo_name(repo_id) or not is_repo_exists(repo_id): return False
443
- files = api.list_repo_files(repo_id=repo_id)
444
- except Exception as e:
445
- print(f"Error: Failed to get {repo_id}'s info. {e}")
446
- gr.Warning(f"Error: Failed to get {repo_id}'s info. {e}")
447
- return False
448
- files = [f for f in files if f.endswith(".gguf")]
449
- if len(files) == 0: return False
450
- else: return True
451
-
452
-
453
- def get_repo_gguf(repo_id: str):
454
- try:
455
- api = HfApi(token=HF_TOKEN)
456
- if not is_repo_name(repo_id) or not is_repo_exists(repo_id): return gr.update(value="", choices=[])
457
- files = api.list_repo_files(repo_id=repo_id)
458
- except Exception as e:
459
- print(f"Error: Failed to get {repo_id}'s info. {e}")
460
- gr.Warning(f"Error: Failed to get {repo_id}'s info. {e}")
461
- return gr.update(value="", choices=[])
462
- files = [f for f in files if f.endswith(".gguf")]
463
- if len(files) == 0: return gr.update(value="", choices=[])
464
- else: return gr.update(value=files[0], choices=files)
465
-
466
-
467
- @spaces.GPU
468
- def change_text_model(model_name: str=MODEL_PATH, use_client: bool=False, gguf_file: Union[str, None]=None,
469
- is_nf4: bool=True, is_lora: bool=True, progress=gr.Progress(track_tqdm=True)):
470
- global use_inference_client, llm_models
471
- use_inference_client = use_client
472
- try:
473
- if not is_repo_name(model_name) or not is_repo_exists(model_name):
474
- raise gr.Error(f"Repo doesn't exist: {model_name}")
475
- if not gguf_file and is_gguf_repo(model_name):
476
- gr.Info(f"Please select a gguf file.")
477
- return gr.update(visible=True)
478
- if use_inference_client:
479
- pass #
480
- else:
481
- load_text_model(model_name, gguf_file, is_nf4, is_lora)
482
- if model_name not in llm_models: llm_models[model_name] = gguf_file if gguf_file else None
483
- return gr.update(choices=get_text_model())
484
- except Exception as e:
485
- raise gr.Error(f"Model load error: {model_name}, {e}")
 
1
+ import os
2
+ if os.environ.get("SPACES_ZERO_GPU") is not None:
3
+ import spaces
4
+ else:
5
+ class spaces:
6
+ @staticmethod
7
+ def GPU(func):
8
+ def wrapper(*args, **kwargs):
9
+ return func(*args, **kwargs)
10
+ return wrapper
11
+ import gradio as gr
12
+ from huggingface_hub import InferenceClient, HfApi
13
+ from torch import nn
14
+ from transformers import AutoModel, AutoProcessor, AutoTokenizer, PreTrainedTokenizer, PreTrainedTokenizerFast, AutoModelForCausalLM, LlavaForConditionalGeneration
15
+ from pathlib import Path
16
+ import torch
17
+ import torch.amp.autocast_mode
18
+ from PIL import Image
19
+ import torchvision.transforms.functional as TVF
20
+ import gc
21
+ from peft import PeftModel
22
+ from typing import Union
23
+
24
+ LOAD_IN_NF4 = True
25
+
26
+ if os.environ.get("SPACES_ZERO_GPU") is not None:
27
+ import subprocess
28
+ LOAD_IN_NF4 = False # If true, Custom VLM LoRA doesn't work initially. The rest are fine.
29
+ subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True)
30
+
31
+ BASE_DIR = Path(__file__).resolve().parent # Define the base directory
32
+ device = "cuda" if torch.cuda.is_available() else "cpu"
33
+ HF_TOKEN = os.environ.get("HF_TOKEN", None)
34
+ use_inference_client = False
35
+ PIXTRAL_PATHS = ["SeanScripts/pixtral-12b-nf4", "mistral-community/pixtral-12b"]
36
+
37
+ llm_models = {
38
+ "Orenguteng/Llama-3.1-8B-Lexi-Uncensored-V2": None,
39
+ #PIXTRAL_PATHS[0]: None,
40
+ "bunnycore/LLama-3.1-8B-Matrix": None,
41
+ "Sao10K/Llama-3.1-8B-Stheno-v3.4": None,
42
+ "unsloth/Meta-Llama-3.1-8B-bnb-4bit": None,
43
+ "DevQuasar/HermesNova-Llama-3.1-8B": None,
44
+ "mergekit-community/L3.1-Boshima-b-FIX": None,
45
+ #"chuanli11/Llama-3.2-3B-Instruct-uncensored": None, # Error(s) in loading state_dict for ImageAdapter:\n\tsize mismatch for linear1.weight: copying a param with shape torch.Size([4096, 1152]) from checkpoint, the shape in current model is torch.Size([3072, 1152]).\n\tsize mismatch for linear1.bias: copying a param with shape torch.Size([4096]) from checkpoint,
46
+ "unsloth/Meta-Llama-3.1-8B-Instruct": None,
47
+ }
48
+
49
+ CLIP_PATH = "google/siglip-so400m-patch14-384"
50
+ MODEL_PATH = list(llm_models.keys())[0]
51
+ CHECKPOINT_PATH = BASE_DIR / Path("cgrkzexw-599808")
52
+ LORA_PATH = CHECKPOINT_PATH / "text_model"
53
+ TITLE = "<h1><center>JoyCaption Alpha Two (2024-09-26a)</center></h1>"
54
+ CAPTION_TYPE_MAP = {
55
+ "Descriptive": [
56
+ "Write a descriptive caption for this image in a formal tone.",
57
+ "Write a descriptive caption for this image in a formal tone within {word_count} words.",
58
+ "Write a {length} descriptive caption for this image in a formal tone.",
59
+ ],
60
+ "Descriptive (Informal)": [
61
+ "Write a descriptive caption for this image in a casual tone.",
62
+ "Write a descriptive caption for this image in a casual tone within {word_count} words.",
63
+ "Write a {length} descriptive caption for this image in a casual tone.",
64
+ ],
65
+ "Training Prompt": [
66
+ "Write a Flux prompt for this image.",
67
+ "Write a Flux prompt for this image within {word_count} words.",
68
+ "Write a {length} Flux prompt for this image.",
69
+ ],
70
+ "MidJourney": [
71
+ "Write a MidJourney prompt for this image.",
72
+ "Write a MidJourney prompt for this image within {word_count} words.",
73
+ "Write a {length} MidJourney prompt for this image.",
74
+ ],
75
+ "Booru tag list": [
76
+ "Write a list of Booru tags for this image.",
77
+ "Write a list of Booru tags for this image within {word_count} words.",
78
+ "Write a {length} list of Booru tags for this image.",
79
+ ],
80
+ "Booru-like tag list": [
81
+ "Write a list of Booru-like tags for this image.",
82
+ "Write a list of Booru-like tags for this image within {word_count} words.",
83
+ "Write a {length} list of Booru-like tags for this image.",
84
+ ],
85
+ "Art Critic": [
86
+ "Analyze this image like an art critic would with information about its composition, style, symbolism, the use of color, light, any artistic movement it might belong to, etc.",
87
+ "Analyze this image like an art critic would with information about its composition, style, symbolism, the use of color, light, any artistic movement it might belong to, etc. Keep it within {word_count} words.",
88
+ "Analyze this image like an art critic would with information about its composition, style, symbolism, the use of color, light, any artistic movement it might belong to, etc. Keep it {length}.",
89
+ ],
90
+ "Product Listing": [
91
+ "Write a caption for this image as though it were a product listing.",
92
+ "Write a caption for this image as though it were a product listing. Keep it under {word_count} words.",
93
+ "Write a {length} caption for this image as though it were a product listing.",
94
+ ],
95
+ "Social Media Post": [
96
+ "Write a caption for this image as if it were being used for a social media post.",
97
+ "Write a caption for this image as if it were being used for a social media post. Limit the caption to {word_count} words.",
98
+ "Write a {length} caption for this image as if it were being used for a social media post.",
99
+ ],
100
+ }
101
+
102
+ class ImageAdapter(nn.Module):
103
+ def __init__(self, input_features: int, output_features: int, ln1: bool, pos_emb: bool, num_image_tokens: int, deep_extract: bool):
104
+ super().__init__()
105
+ self.deep_extract = deep_extract
106
+
107
+ if self.deep_extract:
108
+ input_features = input_features * 5
109
+
110
+ self.linear1 = nn.Linear(input_features, output_features)
111
+ self.activation = nn.GELU()
112
+ self.linear2 = nn.Linear(output_features, output_features)
113
+ self.ln1 = nn.Identity() if not ln1 else nn.LayerNorm(input_features)
114
+ self.pos_emb = None if not pos_emb else nn.Parameter(torch.zeros(num_image_tokens, input_features))
115
+
116
+ # Other tokens (<|image_start|>, <|image_end|>, <|eot_id|>)
117
+ self.other_tokens = nn.Embedding(3, output_features)
118
+ self.other_tokens.weight.data.normal_(mean=0.0, std=0.02) # Matches HF's implementation of llama3
119
+
120
+ def forward(self, vision_outputs: torch.Tensor):
121
+ if self.deep_extract:
122
+ x = torch.concat((
123
+ vision_outputs[-2],
124
+ vision_outputs[3],
125
+ vision_outputs[7],
126
+ vision_outputs[13],
127
+ vision_outputs[20],
128
+ ), dim=-1)
129
+ assert len(x.shape) == 3, f"Expected 3, got {len(x.shape)}" # batch, tokens, features
130
+ assert x.shape[-1] == vision_outputs[-2].shape[-1] * 5, f"Expected {vision_outputs[-2].shape[-1] * 5}, got {x.shape[-1]}"
131
+ else:
132
+ x = vision_outputs[-2]
133
+
134
+ x = self.ln1(x)
135
+
136
+ if self.pos_emb is not None:
137
+ assert x.shape[-2:] == self.pos_emb.shape, f"Expected {self.pos_emb.shape}, got {x.shape[-2:]}"
138
+ x = x + self.pos_emb
139
+
140
+ x = self.linear1(x)
141
+ x = self.activation(x)
142
+ x = self.linear2(x)
143
+
144
+ # <|image_start|>, IMAGE, <|image_end|>
145
+ other_tokens = self.other_tokens(torch.tensor([0, 1], device=self.other_tokens.weight.device).expand(x.shape[0], -1))
146
+ assert other_tokens.shape == (x.shape[0], 2, x.shape[2]), f"Expected {(x.shape[0], 2, x.shape[2])}, got {other_tokens.shape}"
147
+ x = torch.cat((other_tokens[:, 0:1], x, other_tokens[:, 1:2]), dim=1)
148
+
149
+ return x
150
+
151
+ def get_eot_embedding(self):
152
+ return self.other_tokens(torch.tensor([2], device=self.other_tokens.weight.device)).squeeze(0)
153
+
154
+ # https://huggingface.co/docs/transformers/v4.44.2/gguf
155
+ # https://github.com/city96/ComfyUI-GGUF/issues/7
156
+ # https://github.com/THUDM/ChatGLM-6B/issues/18
157
+ # https://github.com/meta-llama/llama/issues/394
158
+ # https://huggingface.co/meta-llama/Meta-Llama-3-8B-Instruct/discussions/109
159
+ # https://huggingface.co/docs/transformers/main/en/main_classes/quantization#offload-between-cpu-and-gpu
160
+ # https://huggingface.co/google/flan-ul2/discussions/8
161
+ # https://huggingface.co/blog/4bit-transformers-bitsandbytes
162
+ # https://huggingface.co/docs/transformers/main/en/peft
163
+ # https://huggingface.co/docs/transformers/main/en/peft#enable-and-disable-adapters
164
+ # https://huggingface.co/docs/transformers/main/quantization/bitsandbytes?bnb=4-bit
165
+ # https://huggingface.co/lllyasviel/flux1-dev-bnb-nf4
166
+ # https://github.com/huggingface/transformers/issues/28515
167
+ # https://gist.github.com/ChrisHayduk/1a53463331f52dca205e55982baf9930
168
+ tokenizer = None
169
+ text_model_client = None
170
+ text_model = None
171
+ image_adapter = None
172
+ pixtral_model = None
173
+ pixtral_processor = None
174
+ def load_text_model(model_name: str=MODEL_PATH, gguf_file: Union[str, None]=None, is_nf4: bool=True, is_lora: bool=True):
175
+ global tokenizer, text_model, image_adapter, pixtral_model, pixtral_processor, text_model_client, use_inference_client
176
+ try:
177
+ tokenizer = None
178
+ text_model_client = None
179
+ text_model = None
180
+ image_adapter = None
181
+ pixtral_model = None
182
+ pixtral_processor = None
183
+ torch.cuda.empty_cache()
184
+ gc.collect()
185
+ lora_device = "auto"
186
+
187
+ from transformers import BitsAndBytesConfig
188
+ nf4_config = BitsAndBytesConfig(load_in_4bit=True, bnb_4bit_quant_type="nf4",
189
+ bnb_4bit_use_double_quant=True, bnb_4bit_compute_dtype=torch.bfloat16)
190
+
191
+ if model_name in PIXTRAL_PATHS: # Pixtral
192
+ print(f"Loading LLM: {model_name}")
193
+ if is_nf4:
194
+ pixtral_model = LlavaForConditionalGeneration.from_pretrained(model_name, quantization_config=nf4_config, device_map=device, torch_dtype=torch.bfloat16).eval()
195
+ else:
196
+ pixtral_model = LlavaForConditionalGeneration.from_pretrained(model_name, device_map=device, torch_dtype=torch.bfloat16).eval()
197
+ pixtral_processor = AutoProcessor.from_pretrained(model_name)
198
+ print(f"pixtral_model: {type(pixtral_model)}") #
199
+ print(f"pixtral_processor: {type(pixtral_processor)}") #
200
+ return
201
+
202
+ print("Loading tokenizer")
203
+ tokenizer = AutoTokenizer.from_pretrained(CHECKPOINT_PATH / "text_model", use_fast=True)
204
+ assert isinstance(tokenizer, PreTrainedTokenizer) or isinstance(tokenizer, PreTrainedTokenizerFast), f"Tokenizer is of type {type(tokenizer)}"
205
+
206
+ print(f"Loading LLM: {model_name}")
207
+ if gguf_file:
208
+ if device == "cpu":
209
+ text_model = AutoModelForCausalLM.from_pretrained(model_name, gguf_file=gguf_file, device_map=device, torch_dtype=torch.bfloat16).eval()
210
+ elif is_nf4:
211
+ text_model = AutoModelForCausalLM.from_pretrained(model_name, gguf_file=gguf_file, quantization_config=nf4_config, device_map=device, torch_dtype=torch.bfloat16).eval()
212
+ else:
213
+ text_model = AutoModelForCausalLM.from_pretrained(model_name, gguf_file=gguf_file, device_map=lora_device, torch_dtype=torch.bfloat16).eval()
214
+ else:
215
+ if device == "cpu":
216
+ text_model = AutoModelForCausalLM.from_pretrained(model_name, gguf_file=gguf_file, device_map=device, torch_dtype=torch.bfloat16).eval()
217
+ elif is_nf4:
218
+ text_model = AutoModelForCausalLM.from_pretrained(model_name, quantization_config=nf4_config, device_map=device, torch_dtype=torch.bfloat16).eval()
219
+ else:
220
+ text_model = AutoModelForCausalLM.from_pretrained(model_name, device_map=lora_device, torch_dtype=torch.bfloat16).eval()
221
+
222
+ if is_lora and LORA_PATH.exists() and not is_nf4:
223
+ print("Loading VLM's custom text model")
224
+ if is_nf4: # omitted
225
+ text_model = PeftModel.from_pretrained(model=text_model, model_id=LORA_PATH, device_map=device, quantization_config=nf4_config)
226
+ else:
227
+ text_model = PeftModel.from_pretrained(model=text_model, model_id=LORA_PATH, device_map=device)
228
+ text_model = text_model.merge_and_unload(safe_merge=True) # to avoid PEFT bug https://github.com/huggingface/transformers/issues/28515
229
+ else: print("VLM's custom text model is not loaded")
230
+
231
+ print("Loading image adapter")
232
+ image_adapter = ImageAdapter(clip_model.config.hidden_size, text_model.config.hidden_size, False, False, 38, False).eval().to("cpu")
233
+ image_adapter.load_state_dict(torch.load(CHECKPOINT_PATH / "image_adapter.pt", map_location="cpu", weights_only=False))
234
+ image_adapter.eval().to(device)
235
+ except Exception as e:
236
+ print(f"LLM load error: {e}")
237
+ raise Exception(f"LLM load error: {e}") from e
238
+ finally:
239
+ torch.cuda.empty_cache()
240
+ gc.collect()
241
+
242
+ load_text_model.zerogpu = True
243
+
244
+ # Load CLIP
245
+ print("Loading CLIP")
246
+ clip_processor = AutoProcessor.from_pretrained(CLIP_PATH)
247
+ clip_model = AutoModel.from_pretrained(CLIP_PATH).vision_model
248
+ assert (CHECKPOINT_PATH / "clip_model.pt").exists()
249
+ if (CHECKPOINT_PATH / "clip_model.pt").exists():
250
+ print("Loading VLM's custom vision model")
251
+ checkpoint = torch.load(CHECKPOINT_PATH / "clip_model.pt", map_location='cpu', weights_only=False)
252
+ checkpoint = {k.replace("_orig_mod.module.", ""): v for k, v in checkpoint.items()}
253
+ clip_model.load_state_dict(checkpoint)
254
+ del checkpoint
255
+ clip_model.eval().requires_grad_(False).to(device)
256
+
257
+ # Tokenizer
258
+ # LLM
259
+ # Image Adapter
260
+ #load_text_model(PIXTRAL_PATHS[0])
261
+ #print(f"pixtral_model: {type(pixtral_model)}") #
262
+ #print(f"pixtral_processor: {type(pixtral_processor)}") #
263
+ load_text_model(MODEL_PATH, None, LOAD_IN_NF4, True)
264
+ #print(f"pixtral_model: {type(pixtral_model)}") #
265
+ #print(f"pixtral_processor: {type(pixtral_processor)}") #
266
+
267
+ @spaces.GPU
268
+ @torch.inference_mode()
269
+ def stream_chat_mod(input_image: Image.Image, caption_type: str, caption_length: Union[str, int], extra_options: list[str], name_input: str, custom_prompt: str,
270
+ max_new_tokens: int=300, top_p: float=0.9, temperature: float=0.6, model_name: str=MODEL_PATH, progress=gr.Progress(track_tqdm=True)) -> tuple[str, str]:
271
+ global tokenizer, text_model, image_adapter, pixtral_model, pixtral_processor, text_model_client, use_inference_client
272
+ torch.cuda.empty_cache()
273
+ gc.collect()
274
+
275
+ # 'any' means no length specified
276
+ length = None if caption_length == "any" else caption_length
277
+
278
+ if isinstance(length, str):
279
+ try:
280
+ length = int(length)
281
+ except ValueError:
282
+ pass
283
+
284
+ # Build prompt
285
+ if length is None:
286
+ map_idx = 0
287
+ elif isinstance(length, int):
288
+ map_idx = 1
289
+ elif isinstance(length, str):
290
+ map_idx = 2
291
+ else:
292
+ raise ValueError(f"Invalid caption length: {length}")
293
+
294
+ prompt_str = CAPTION_TYPE_MAP[caption_type][map_idx]
295
+
296
+ # Add extra options
297
+ if len(extra_options) > 0:
298
+ prompt_str += " " + " ".join(extra_options)
299
+
300
+ # Add name, length, word_count
301
+ prompt_str = prompt_str.format(name=name_input, length=caption_length, word_count=caption_length)
302
+
303
+ if custom_prompt.strip() != "":
304
+ prompt_str = custom_prompt.strip()
305
+
306
+ # For debugging
307
+ print(f"Prompt: {prompt_str}")
308
+
309
+ # Pixtral
310
+ if model_name in PIXTRAL_PATHS:
311
+ print(f"pixtral_model: {type(pixtral_model)}") #
312
+ print(f"pixtral_processor: {type(pixtral_processor)}") #
313
+ input_images = [input_image.convert("RGB")]
314
+ input_prompt = "[INST]Caption this image:\n[IMG][/INST]"
315
+ inputs = pixtral_processor(images=input_images, text=input_prompt, return_tensors="pt").to(device)
316
+ generate_ids = pixtral_model.generate(**inputs, max_new_tokens=max_new_tokens)
317
+ output = pixtral_processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
318
+ return input_prompt, output.strip()
319
+
320
+ # Preprocess image
321
+ # NOTE: I found the default processor for so400M to have worse results than just using PIL directly
322
+ #image = clip_processor(images=input_image, return_tensors='pt').pixel_values
323
+ image = input_image.resize((384, 384), Image.LANCZOS)
324
+ pixel_values = TVF.pil_to_tensor(image).unsqueeze(0) / 255.0
325
+ pixel_values = TVF.normalize(pixel_values, [0.5], [0.5])
326
+ pixel_values = pixel_values.to(device)
327
+
328
+ # Embed image
329
+ # This results in Batch x Image Tokens x Features
330
+ with torch.amp.autocast_mode.autocast(device, enabled=True):
331
+ vision_outputs = clip_model(pixel_values=pixel_values, output_hidden_states=True)
332
+ image_features = vision_outputs.hidden_states
333
+ embedded_images = image_adapter(image_features)
334
+ embedded_images = embedded_images.to(device)
335
+
336
+ # Build the conversation
337
+ convo = [
338
+ {
339
+ "role": "system",
340
+ "content": "You are a helpful image captioner.",
341
+ },
342
+ {
343
+ "role": "user",
344
+ "content": prompt_str,
345
+ },
346
+ ]
347
+
348
+ # Format the conversation
349
+ convo_string = tokenizer.apply_chat_template(convo, tokenize = False, add_generation_prompt = True)
350
+ assert isinstance(convo_string, str)
351
+
352
+ # Tokenize the conversation
353
+ # prompt_str is tokenized separately so we can do the calculations below
354
+ convo_tokens = tokenizer.encode(convo_string, return_tensors="pt", add_special_tokens=False, truncation=False)
355
+ prompt_tokens = tokenizer.encode(prompt_str, return_tensors="pt", add_special_tokens=False, truncation=False)
356
+ assert isinstance(convo_tokens, torch.Tensor) and isinstance(prompt_tokens, torch.Tensor)
357
+ convo_tokens = convo_tokens.squeeze(0) # Squeeze just to make the following easier
358
+ prompt_tokens = prompt_tokens.squeeze(0)
359
+
360
+ # Calculate where to inject the image
361
+ eot_id_indices = (convo_tokens == tokenizer.convert_tokens_to_ids("<|eot_id|>")).nonzero(as_tuple=True)[0].tolist()
362
+ assert len(eot_id_indices) == 2, f"Expected 2 <|eot_id|> tokens, got {len(eot_id_indices)}"
363
+
364
+ preamble_len = eot_id_indices[1] - prompt_tokens.shape[0] # Number of tokens before the prompt
365
+
366
+ # Embed the tokens
367
+ convo_embeds = text_model.model.embed_tokens(convo_tokens.unsqueeze(0).to(device))
368
+
369
+ # Construct the input
370
+ input_embeds = torch.cat([
371
+ convo_embeds[:, :preamble_len], # Part before the prompt
372
+ embedded_images.to(dtype=convo_embeds.dtype), # Image
373
+ convo_embeds[:, preamble_len:], # The prompt and anything after it
374
+ ], dim=1).to(device)
375
+
376
+ input_ids = torch.cat([
377
+ convo_tokens[:preamble_len].unsqueeze(0),
378
+ torch.zeros((1, embedded_images.shape[1]), dtype=torch.long), # Dummy tokens for the image (TODO: Should probably use a special token here so as not to confuse any generation algorithms that might be inspecting the input)
379
+ convo_tokens[preamble_len:].unsqueeze(0),
380
+ ], dim=1).to(device)
381
+ attention_mask = torch.ones_like(input_ids)
382
+
383
+ # Debugging
384
+ #print(f"Input to model: {repr(tokenizer.decode(input_ids[0]))}")
385
+
386
+ text_model.to(device)
387
+ generate_ids = text_model.generate(input_ids, inputs_embeds=input_embeds, attention_mask=attention_mask, max_new_tokens=max_new_tokens,
388
+ do_sample=True, suppress_tokens=None, top_p=top_p, temperature=temperature)
389
+
390
+ # Trim off the prompt
391
+ generate_ids = generate_ids[:, input_ids.shape[1]:]
392
+ if generate_ids[0][-1] == tokenizer.eos_token_id or generate_ids[0][-1] == tokenizer.convert_tokens_to_ids("<|eot_id|>"):
393
+ generate_ids = generate_ids[:, :-1]
394
+
395
+ caption = tokenizer.batch_decode(generate_ids, skip_special_tokens=False, clean_up_tokenization_spaces=False)[0]
396
+
397
+ return prompt_str, caption.strip()
398
+
399
+
400
+ # https://huggingface.co/docs/transformers/v4.44.2/main_classes/text_generation#transformers.FlaxGenerationMixin.generate
401
+ # https://github.com/huggingface/transformers/issues/6535
402
+ # https://zenn.dev/hijikix/articles/8c445f4373fdcc ja
403
+ # https://github.com/ggerganov/llama.cpp/discussions/7712
404
+ # https://huggingface.co/docs/huggingface_hub/guides/inference#openai-compatibility
405
+ # https://huggingface.co/docs/huggingface_hub/v0.24.6/en/package_reference/inference_client#huggingface_hub.InferenceClient.text_generation
406
+
407
+
408
+ def is_repo_name(s):
409
+ import re
410
+ return re.fullmatch(r'^[^/,\s\"\']+/[^/,\s\"\']+$', s)
411
+
412
+
413
+ def is_repo_exists(repo_id):
414
+ try:
415
+ api = HfApi(token=HF_TOKEN)
416
+ if api.repo_exists(repo_id=repo_id): return True
417
+ else: return False
418
+ except Exception as e:
419
+ print(f"Error: Failed to connect {repo_id}. {e}")
420
+ return True # for safe
421
+
422
+
423
+ def is_valid_repo(repo_id):
424
+ import re
425
+ try:
426
+ if not re.fullmatch(r'^[^/,\s\"\']+/[^/,\s\"\']+$', repo_id): return False
427
+ api = HfApi()
428
+ if api.repo_exists(repo_id=repo_id): return True
429
+ else: return False
430
+ except Exception as e:
431
+ print(f"Failed to connect {repo_id}. {e}")
432
+ return False
433
+
434
+
435
+ def get_text_model():
436
+ return list(llm_models.keys())
437
+
438
+
439
+ def is_gguf_repo(repo_id: str):
440
+ try:
441
+ api = HfApi(token=HF_TOKEN)
442
+ if not is_repo_name(repo_id) or not is_repo_exists(repo_id): return False
443
+ files = api.list_repo_files(repo_id=repo_id)
444
+ except Exception as e:
445
+ print(f"Error: Failed to get {repo_id}'s info. {e}")
446
+ gr.Warning(f"Error: Failed to get {repo_id}'s info. {e}")
447
+ return False
448
+ files = [f for f in files if f.endswith(".gguf")]
449
+ if len(files) == 0: return False
450
+ else: return True
451
+
452
+
453
+ def get_repo_gguf(repo_id: str):
454
+ try:
455
+ api = HfApi(token=HF_TOKEN)
456
+ if not is_repo_name(repo_id) or not is_repo_exists(repo_id): return gr.update(value="", choices=[])
457
+ files = api.list_repo_files(repo_id=repo_id)
458
+ except Exception as e:
459
+ print(f"Error: Failed to get {repo_id}'s info. {e}")
460
+ gr.Warning(f"Error: Failed to get {repo_id}'s info. {e}")
461
+ return gr.update(value="", choices=[])
462
+ files = [f for f in files if f.endswith(".gguf")]
463
+ if len(files) == 0: return gr.update(value="", choices=[])
464
+ else: return gr.update(value=files[0], choices=files)
465
+
466
+
467
+ @spaces.GPU
468
+ def change_text_model(model_name: str=MODEL_PATH, use_client: bool=False, gguf_file: Union[str, None]=None,
469
+ is_nf4: bool=True, is_lora: bool=True, progress=gr.Progress(track_tqdm=True)):
470
+ global use_inference_client, llm_models
471
+ use_inference_client = use_client
472
+ try:
473
+ if not is_repo_name(model_name) or not is_repo_exists(model_name):
474
+ raise gr.Error(f"Repo doesn't exist: {model_name}")
475
+ if not gguf_file and is_gguf_repo(model_name):
476
+ gr.Info(f"Please select a gguf file.")
477
+ return gr.update(visible=True)
478
+ if use_inference_client:
479
+ pass #
480
+ else:
481
+ load_text_model(model_name, gguf_file, is_nf4, is_lora)
482
+ if model_name not in llm_models: llm_models[model_name] = gguf_file if gguf_file else None
483
+ return gr.update(choices=get_text_model())
484
+ except Exception as e:
485
+ raise gr.Error(f"Model load error: {model_name}, {e}")