Update joycaption.py
Browse files- joycaption.py +3 -1
joycaption.py
CHANGED
@@ -264,8 +264,10 @@ load_text_model(MODEL_PATH, None, LOAD_IN_NF4, True)
|
|
264 |
#print(f"pixtral_model: {type(pixtral_model)}") #
|
265 |
#print(f"pixtral_processor: {type(pixtral_processor)}") #
|
266 |
|
267 |
-
|
|
|
268 |
@torch.inference_mode()
|
|
|
269 |
def stream_chat_mod(input_image: Image.Image, caption_type: str, caption_length: Union[str, int], extra_options: list[str], name_input: str, custom_prompt: str,
|
270 |
max_new_tokens: int=300, top_p: float=0.9, temperature: float=0.6, model_name: str=MODEL_PATH, progress=gr.Progress(track_tqdm=True)) -> tuple[str, str]:
|
271 |
global tokenizer, text_model, image_adapter, pixtral_model, pixtral_processor, text_model_client, use_inference_client
|
|
|
264 |
#print(f"pixtral_model: {type(pixtral_model)}") #
|
265 |
#print(f"pixtral_processor: {type(pixtral_processor)}") #
|
266 |
|
267 |
+
|
268 |
+
@spaces.GPU()
|
269 |
@torch.inference_mode()
|
270 |
+
@demo.queue()
|
271 |
def stream_chat_mod(input_image: Image.Image, caption_type: str, caption_length: Union[str, int], extra_options: list[str], name_input: str, custom_prompt: str,
|
272 |
max_new_tokens: int=300, top_p: float=0.9, temperature: float=0.6, model_name: str=MODEL_PATH, progress=gr.Progress(track_tqdm=True)) -> tuple[str, str]:
|
273 |
global tokenizer, text_model, image_adapter, pixtral_model, pixtral_processor, text_model_client, use_inference_client
|