mrbeliever commited on
Commit
352c3f8
Β·
verified Β·
1 Parent(s): 2c8fc65

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +45 -72
app.py CHANGED
@@ -1,100 +1,73 @@
1
  import gradio as gr
 
2
  from transformers import AutoModelForCausalLM, AutoProcessor
3
  import torch
4
  from PIL import Image
 
 
5
 
6
- # Model and Processor Initialization
7
  models = {
8
- "microsoft/Phi-3.5-vision-instruct": AutoModelForCausalLM.from_pretrained(
9
- "microsoft/Phi-3.5-vision-instruct",
10
- trust_remote_code=True,
11
- torch_dtype="auto",
12
- _attn_implementation="flash_attention_2"
13
- ).cuda().eval()
14
  }
15
 
16
  processors = {
17
  "microsoft/Phi-3.5-vision-instruct": AutoProcessor.from_pretrained("microsoft/Phi-3.5-vision-instruct", trust_remote_code=True)
18
  }
19
 
20
- # Default question
21
- default_question = (
22
- "You are an image-to-prompt converter. Your work is to observe each and every detail of the image and "
23
- "craft a detailed prompt under 100 words in this format: [image content/subject, description of action, state, "
24
- "and mood], [art form, style], [artist/photographer reference if needed], [additional settings such as camera "
25
- "and lens settings, lighting, colors, effects, texture, background, rendering]."
26
- )
27
 
28
- # Function to generate prompt
29
- def generate_caption(image):
30
- model = models["microsoft/Phi-3.5-vision-instruct"]
31
- processor = processors["microsoft/Phi-3.5-vision-instruct"]
32
 
33
- prompt = f"<|user|>\n<|image_1|>\n{default_question}<|end|>\n<|assistant|>\n"
 
 
 
 
 
 
 
 
 
 
 
34
  image = Image.fromarray(image).convert("RGB")
35
 
36
  inputs = processor(prompt, image, return_tensors="pt").to("cuda:0")
37
- generate_ids = model.generate(
38
- **inputs,
39
- max_new_tokens=1000,
40
- eos_token_id=processor.tokenizer.eos_token_id,
41
- )
42
  generate_ids = generate_ids[:, inputs['input_ids'].shape[1]:]
43
- response = processor.batch_decode(
44
- generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False
45
- )[0]
46
  return response
47
 
48
- # Enhanced CSS for streamlined UI
49
  css = """
50
- #container {
51
- background-color: #f9f9f9;
52
- padding: 20px;
53
- border-radius: 15px;
54
- border: 2px solid #333;
55
- box-shadow: 0 4px 8px rgba(0, 0, 0, 0.2);
56
- max-width: 450px;
57
- margin: auto;
58
- }
59
- #input_image {
60
- margin-top: 15px;
61
- border: 2px solid #333;
62
- border-radius: 8px;
63
- height: 180px;
64
- object-fit: contain;
65
- }
66
- #output_caption {
67
- margin-top: 15px;
68
- border: 2px solid #333;
69
- border-radius: 8px;
70
- height: 180px;
71
- overflow-y: auto;
72
- }
73
- #run_button {
74
- background-color: #fff;
75
- color: black;
76
- border-radius: 10px;
77
- padding: 10px;
78
- cursor: pointer;
79
- transition: background-color 0.3s ease;
80
- margin-top: 15px;
81
  }
82
- #run_button:hover {
83
- background-color: #333;
84
  }
85
  """
86
 
87
- # Gradio Interface with Adjustments
88
  with gr.Blocks(css=css) as demo:
89
- with gr.Column(elem_id="container"):
90
- input_image = gr.Image(type="pil", elem_id="input_image", label="Upload Image")
91
- run_button = gr.Button(value="Generate Prompt", elem_id="run_button")
92
- output_caption = gr.Textbox(label="Generated Prompt", show_copy_button=True, elem_id="output_caption", lines=6)
 
 
 
 
 
 
93
 
94
- run_button.click(
95
- fn=generate_caption,
96
- inputs=[input_image],
97
- outputs=output_caption,
98
- )
99
 
100
- demo.launch(share=False)
 
 
1
  import gradio as gr
2
+ import spaces
3
  from transformers import AutoModelForCausalLM, AutoProcessor
4
  import torch
5
  from PIL import Image
6
+ import subprocess
7
+ subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True)
8
 
 
9
  models = {
10
+ "microsoft/Phi-3.5-vision-instruct": AutoModelForCausalLM.from_pretrained("microsoft/Phi-3.5-vision-instruct", trust_remote_code=True, torch_dtype="auto", _attn_implementation="flash_attention_2").cuda().eval()
 
 
 
 
 
11
  }
12
 
13
  processors = {
14
  "microsoft/Phi-3.5-vision-instruct": AutoProcessor.from_pretrained("microsoft/Phi-3.5-vision-instruct", trust_remote_code=True)
15
  }
16
 
17
+ DESCRIPTION = "[Phi-3.5-vision Demo](https://huggingface.co/microsoft/Phi-3.5-vision-instruct)"
 
 
 
 
 
 
18
 
19
+ kwargs = {}
20
+ kwargs['torch_dtype'] = torch.bfloat16
 
 
21
 
22
+ user_prompt = '<|user|>\n'
23
+ assistant_prompt = '<|assistant|>\n'
24
+ prompt_suffix = "<|end|>\n"
25
+
26
+ default_question = "You are an image to prompt converter. Your work is to observe each and every detail of the image and craft a detailed prompt under 100 words in this format: [image content/subject, description of action, state, and mood], [art form, style], [artist/photographer reference if needed], [additional settings such as camera and lens settings, lighting, colors, effects, texture, background, rendering]."
27
+
28
+ @spaces.GPU
29
+ def run_example(image, text_input=default_question, model_id="microsoft/Phi-3.5-vision-instruct"):
30
+ model = models[model_id]
31
+ processor = processors[model_id]
32
+
33
+ prompt = f"{user_prompt}<|image_1|>\n{text_input}{prompt_suffix}{assistant_prompt}"
34
  image = Image.fromarray(image).convert("RGB")
35
 
36
  inputs = processor(prompt, image, return_tensors="pt").to("cuda:0")
37
+ generate_ids = model.generate(**inputs,
38
+ max_new_tokens=1000,
39
+ eos_token_id=processor.tokenizer.eos_token_id,
40
+ )
 
41
  generate_ids = generate_ids[:, inputs['input_ids'].shape[1]:]
42
+ response = processor.batch_decode(generate_ids,
43
+ skip_special_tokens=True,
44
+ clean_up_tokenization_spaces=False)[0]
45
  return response
46
 
 
47
  css = """
48
+ #output {
49
+ height: 500px;
50
+ overflow: auto;
51
+ border: 1px solid #ccc;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
52
  }
53
+ #model_selector, #text_input {
54
+ display: none !important;
55
  }
56
  """
57
 
 
58
  with gr.Blocks(css=css) as demo:
59
+ gr.Markdown(DESCRIPTION)
60
+ with gr.Tab(label="Phi-3.5 Input"):
61
+ with gr.Row():
62
+ with gr.Column():
63
+ input_img = gr.Image(label="Input Picture")
64
+ model_selector = gr.Dropdown(choices=list(models.keys()), label="Model", value="microsoft/Phi-3.5-vision-instruct", visible=False)
65
+ text_input = gr.Textbox(label="Question", value=default_question, visible=False)
66
+ submit_btn = gr.Button(value="Submit")
67
+ with gr.Column():
68
+ output_text = gr.Textbox(label="Output Text")
69
 
70
+ submit_btn.click(run_example, [input_img, text_input, model_selector], [output_text])
 
 
 
 
71
 
72
+ demo.queue(api_open=False)
73
+ demo.launch(debug=True, show_api=False)