import gradio as gr import spaces from transformers import Qwen2VLForConditionalGeneration, AutoProcessor from qwen_vl_utils import process_vision_info import torch from PIL import Image import subprocess from datetime import datetime import numpy as np import os # subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True) # models = { # "Qwen/Qwen2-VL-7B-Instruct": AutoModelForCausalLM.from_pretrained("Qwen/Qwen2-VL-7B-Instruct", trust_remote_code=True, torch_dtype="auto", _attn_implementation="flash_attention_2").cuda().eval() # } def array_to_image_path(image_array): # Convert numpy array to PIL Image img = Image.fromarray(np.uint8(image_array)) img.thumbnail((1024, 1024)) # Generate a unique filename using timestamp timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") filename = f"image_{timestamp}.png" # Save the image img.save(filename) # Get the full path of the saved image full_path = os.path.abspath(filename) return full_path models = { "Qwen/Qwen2-VL-7B-Instruct": Qwen2VLForConditionalGeneration.from_pretrained("Qwen/Qwen2-VL-7B-Instruct", trust_remote_code=True, torch_dtype="auto").cuda().eval() } processors = { "Qwen/Qwen2-VL-7B-Instruct": AutoProcessor.from_pretrained("Qwen/Qwen2-VL-7B-Instruct", trust_remote_code=True) } DESCRIPTION = "This demo uses[Qwen2-VL-7B-Instruct](https://huggingface.co/Qwen/Qwen2-VL-7B-Instruct)" kwargs = {} kwargs['torch_dtype'] = torch.bfloat16 user_prompt = '<|user|>\n' assistant_prompt = '<|assistant|>\n' prompt_suffix = "<|end|>\n" @spaces.GPU def run_example(image, model_id="Qwen/Qwen2-VL-7B-Instruct"): text_input = "Convert the image to text." image_path = array_to_image_path(image) print(image_path) model = models[model_id] processor = processors[model_id] prompt = f"{user_prompt}<|image_1|>\n{text_input}{prompt_suffix}{assistant_prompt}" image = Image.fromarray(image).convert("RGB") messages = [ { "role": "user", "content": [ { "type": "image", "image": image_path, }, {"type": "text", "text": text_input}, ], } ] # Preparation for inference text = processor.apply_chat_template( messages, tokenize=False, add_generation_prompt=True ) image_inputs, video_inputs = process_vision_info(messages) inputs = processor( text=[text], images=image_inputs, videos=video_inputs, padding=True, return_tensors="pt", ) inputs = inputs.to("cuda") # Inference: Generation of the output generated_ids = model.generate(**inputs, max_new_tokens=1024) generated_ids_trimmed = [ out_ids[len(in_ids) :] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) ] output_text = processor.batch_decode( generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False ) return output_text[0] css = """ /* Overall app styling */ .gradio-container { max-width: 1200px !important; margin: 0 auto; padding: 20px; background-color: #f8f9fa; } /* Tabs styling */ .tabs { border-radius: 8px; background: white; padding: 20px; box-shadow: 0 2px 6px rgba(0, 0, 0, 0.1); } /* Input/Output containers */ .input-container, .output-container { background: white; border-radius: 8px; padding: 15px; margin: 10px 0; box-shadow: 0 2px 4px rgba(0, 0, 0, 0.05); } /* Button styling */ .submit-btn { background-color: #2d31fa !important; border: none !important; padding: 8px 20px !important; border-radius: 6px !important; color: white !important; transition: all 0.3s ease !important; } .submit-btn:hover { background-color: #1f24c7 !important; transform: translateY(-1px); } /* Output text area */ #output { height: 500px; overflow: auto; border: 1px solid #e0e0e0; border-radius: 6px; padding: 15px; background: #ffffff; font-family: 'Arial', sans-serif; } /* Dropdown styling */ .gr-dropdown { border-radius: 6px !important; border: 1px solid #e0e0e0 !important; } /* Image upload area */ .gr-image-input { border: 2px dashed #ccc; border-radius: 8px; padding: 20px; transition: all 0.3s ease; } .gr-image-input:hover { border-color: #2d31fa; } """ with gr.Blocks(css=css) as demo: gr.Image("Caracal.jpg", interactive=False) with gr.Tab(label="Image Input", elem_classes="tabs"): with gr.Row(): with gr.Column(elem_classes="input-container"): input_img = gr.Image(label="Input Picture", elem_classes="gr-image-input") model_selector = gr.Dropdown(choices=list(models.keys()), label="Model", value="Qwen/Qwen2-VL-7B-Instruct", elem_classes="gr-dropdown") submit_btn = gr.Button(value="Submit", elem_classes="submit-btn") with gr.Column(elem_classes="output-container"): output_text = gr.Textbox(label="Output Text", elem_id="output") submit_btn.click(run_example, [input_img, model_selector], [output_text]) demo.queue(api_open=False) demo.launch(debug=True)