mrbeliever commited on
Commit
190ad42
Β·
verified Β·
1 Parent(s): 16e4b7a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +48 -37
app.py CHANGED
@@ -3,67 +3,78 @@ import spaces
3
  from transformers import AutoModelForCausalLM, AutoProcessor
4
  import torch
5
  from PIL import Image
6
- import subprocess
7
- subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True)
8
 
 
9
  models = {
10
  "microsoft/Phi-3.5-vision-instruct": AutoModelForCausalLM.from_pretrained("microsoft/Phi-3.5-vision-instruct", trust_remote_code=True, torch_dtype="auto", _attn_implementation="flash_attention_2").cuda().eval()
11
-
12
  }
13
 
14
  processors = {
15
  "microsoft/Phi-3.5-vision-instruct": AutoProcessor.from_pretrained("microsoft/Phi-3.5-vision-instruct", trust_remote_code=True)
16
  }
17
 
18
- DESCRIPTION = "[Phi-3.5-vision Demo](https://huggingface.co/microsoft/Phi-3.5-vision-instruct)"
19
-
20
- kwargs = {}
21
- kwargs['torch_dtype'] = torch.bfloat16
22
-
23
- user_prompt = '<|user|>\n'
24
- assistant_prompt = '<|assistant|>\n'
25
- prompt_suffix = "<|end|>\n"
26
 
27
  @spaces.GPU
28
- def run_example(image, text_input=None, model_id="microsoft/Phi-3.5-vision-instruct"):
29
  model = models[model_id]
30
  processor = processors[model_id]
31
 
32
- prompt = f"{user_prompt}<|image_1|>\n{text_input}{prompt_suffix}{assistant_prompt}"
 
33
  image = Image.fromarray(image).convert("RGB")
34
 
35
  inputs = processor(prompt, image, return_tensors="pt").to("cuda:0")
36
- generate_ids = model.generate(**inputs,
37
- max_new_tokens=1000,
38
- eos_token_id=processor.tokenizer.eos_token_id,
39
- )
40
  generate_ids = generate_ids[:, inputs['input_ids'].shape[1]:]
41
- response = processor.batch_decode(generate_ids,
42
- skip_special_tokens=True,
43
- clean_up_tokenization_spaces=False)[0]
44
  return response
45
 
46
  css = """
47
- #output {
48
- height: 500px;
49
- overflow: auto;
50
- border: 1px solid #ccc;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
51
  }
52
  """
53
 
54
  with gr.Blocks(css=css) as demo:
55
- gr.Markdown(DESCRIPTION)
56
- with gr.Tab(label="Phi-3.5 Input"):
57
- with gr.Row():
58
- with gr.Column():
59
- input_img = gr.Image(label="Input Picture")
60
- model_selector = gr.Dropdown(choices=list(models.keys()), label="Model", value="microsoft/Phi-3.5-vision-instruct")
61
- text_input = gr.Textbox(label="Question")
62
- submit_btn = gr.Button(value="Submit")
63
- with gr.Column():
64
- output_text = gr.Textbox(label="Output Text")
65
 
66
- submit_btn.click(run_example, [input_img, text_input, model_selector], [output_text])
67
 
68
- demo.queue(api_open=False)
69
- demo.launch(debug=True, show_api=False)
 
3
  from transformers import AutoModelForCausalLM, AutoProcessor
4
  import torch
5
  from PIL import Image
 
 
6
 
7
+ # Load model and processor
8
  models = {
9
  "microsoft/Phi-3.5-vision-instruct": AutoModelForCausalLM.from_pretrained("microsoft/Phi-3.5-vision-instruct", trust_remote_code=True, torch_dtype="auto", _attn_implementation="flash_attention_2").cuda().eval()
 
10
  }
11
 
12
  processors = {
13
  "microsoft/Phi-3.5-vision-instruct": AutoProcessor.from_pretrained("microsoft/Phi-3.5-vision-instruct", trust_remote_code=True)
14
  }
15
 
16
+ # Set the default query
17
+ DEFAULT_QUERY = "You are an image to prompt converter. Your work is to observe each and every detail of the image and craft a detailed prompt under 100 words in this format: [image content/subject, description of action, state, and mood], [art form, style], [artist/photographer reference if needed], [additional settings such as camera and lens settings, lighting, colors, effects, texture, background, rendering]."
 
 
 
 
 
 
18
 
19
  @spaces.GPU
20
+ def run_example(image, model_id="microsoft/Phi-3.5-vision-instruct"):
21
  model = models[model_id]
22
  processor = processors[model_id]
23
 
24
+ # Use the default query directly without a user input text field
25
+ prompt = f"<|user|>\n<|image_1|>\n{DEFAULT_QUERY}<|end|>\n<|assistant|>"
26
  image = Image.fromarray(image).convert("RGB")
27
 
28
  inputs = processor(prompt, image, return_tensors="pt").to("cuda:0")
29
+ generate_ids = model.generate(**inputs, max_new_tokens=1000, eos_token_id=processor.tokenizer.eos_token_id)
 
 
 
30
  generate_ids = generate_ids[:, inputs['input_ids'].shape[1]:]
31
+ response = processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
 
 
32
  return response
33
 
34
  css = """
35
+ #container {
36
+ background-color: #f9f9f9;
37
+ padding: 20px;
38
+ border-radius: 15px;
39
+ border: 2px solid #333;
40
+ box-shadow: 0 4px 8px rgba(0, 0, 0, 0.2);
41
+ max-width: 450px;
42
+ margin: auto;
43
+ }
44
+ #input_image {
45
+ margin-top: 15px;
46
+ border: 2px solid #333;
47
+ border-radius: 8px;
48
+ height: 180px;
49
+ object-fit: contain;
50
+ }
51
+ #output_text {
52
+ margin-top: 15px;
53
+ border: 2px solid #333;
54
+ border-radius: 8px;
55
+ height: 180px;
56
+ overflow-y: auto;
57
+ }
58
+ #submit_btn {
59
+ background-color: #fff;
60
+ color: black;
61
+ border-radius: 10px;
62
+ padding: 10px;
63
+ cursor: pointer;
64
+ transition: background-color 0.3s ease;
65
+ margin-top: 15px;
66
+ }
67
+ #submit_btn:hover {
68
+ background-color: #333;
69
  }
70
  """
71
 
72
  with gr.Blocks(css=css) as demo:
73
+ with gr.Column(elem_id="container"):
74
+ input_image = gr.Image(type="pil", label="Upload Image", elem_id="input_image")
75
+ submit_btn = gr.Button(value="Generate Prompt", elem_id="submit_btn")
76
+ output_text = gr.Textbox(label="Prompt Output", elem_id="output_text", show_copy_button=True, lines=6)
 
 
 
 
 
 
77
 
78
+ submit_btn.click(run_example, [input_image], output_text)
79
 
80
+ demo.launch(share=False)